blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ddb5bc9e87cfc65e9510b627a37f2f35ff9512ab | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp-with-texts/WWP-LEOS-TABLE-CHG-NOTIF-MIB.py | d8fa65af32a4a4509f661a718246905fca451a21 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 12,414 | py | #
# PySNMP MIB module WWP-LEOS-TABLE-CHG-NOTIF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/WWP-LEOS-TABLE-CHG-NOTIF-MIB
# Produced by pysmi-0.3.4 at Wed May 1 15:38:29 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
AddressFamilyNumbers, = mibBuilder.importSymbols("IANA-ADDRESS-FAMILY-NUMBERS-MIB", "AddressFamilyNumbers")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
Counter64, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, MibIdentifier, Integer32, ModuleIdentity, NotificationType, iso, Bits, Gauge32, Counter32, TimeTicks, IpAddress = mibBuilder.importSymbols("SNMPv2-SMI", "Counter64", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "MibIdentifier", "Integer32", "ModuleIdentity", "NotificationType", "iso", "Bits", "Gauge32", "Counter32", "TimeTicks", "IpAddress")
RowStatus, TruthValue, MacAddress, DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "RowStatus", "TruthValue", "MacAddress", "DisplayString", "TextualConvention")
wwpModulesLeos, wwpModules = mibBuilder.importSymbols("WWP-SMI", "wwpModulesLeos", "wwpModules")
wwpLeosTableChgNotifMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9))
wwpLeosTableChgNotifMIB.setRevisions(('2002-03-12 00:00',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: wwpLeosTableChgNotifMIB.setRevisionsDescriptions(('Initial creation.',))
if mibBuilder.loadTexts: wwpLeosTableChgNotifMIB.setLastUpdated('200203120000Z')
if mibBuilder.loadTexts: wwpLeosTableChgNotifMIB.setOrganization('World Wide Packets, Inc')
if mibBuilder.loadTexts: wwpLeosTableChgNotifMIB.setContactInfo(' Mib Meister Postal: World Wide Packets P.O. Box 950 Veradale, WA 99037 USA Phone: +1 509 242 9000 Email: [email protected]')
if mibBuilder.loadTexts: wwpLeosTableChgNotifMIB.setDescription('This MIB module is used to maintain the table of (Table, Ems IP)')
wwpLeosTableChgNotifMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1))
wwpLeosTableChgNotif = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 7))
wwpLeosTableChgNotifMIBNotificationPrefix = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 2))
wwpLeosTableChgNotifMIBNotifications = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 2, 0))
wwpLeosTableChgNotifMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 3))
wwpLeosTableChgNotifMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 3, 1))
wwpLeosTableChgNotifMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 3, 2))
wwpLeosTableTrapNotifTimer = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 3600))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosTableTrapNotifTimer.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableTrapNotifTimer.setDescription('Setting this value will set the trap timer. Device will send the trap notification after every wwpLeosTableTrapNotifTimer seconds if any of the table changes.')
wwpLeosListenerRefreshTimer = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 1000))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: wwpLeosListenerRefreshTimer.setStatus('current')
if mibBuilder.loadTexts: wwpLeosListenerRefreshTimer.setDescription("Setting this value will set the listener refresh timer. Device will send the trap notification 'wwpLeosTableRefreshTrap' after every 'wwpLeosListenerRefreshTimer' seconds if count value associated with 'wwpLeosListenerEntry' becomes 20 or 10 or 5. Device will delete entry from wwpLeosListenerEntry once count = 0. Minimum value is 30 sec and maximum = 1000 seconds.")
wwpLeosTableChgNotifTable = MibTable((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 4), )
if mibBuilder.loadTexts: wwpLeosTableChgNotifTable.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableChgNotifTable.setDescription('Table of Ems.')
wwpLeosTableChgNotifEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 4, 1), ).setIndexNames((0, "WWP-LEOS-TABLE-CHG-NOTIF-MIB", "wwpLeosTableChgNotifIndex"))
if mibBuilder.loadTexts: wwpLeosTableChgNotifEntry.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableChgNotifEntry.setDescription('Ems Entry in the ems Port Table.')
wwpLeosTableChgNotifIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 100))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosTableChgNotifIndex.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableChgNotifIndex.setDescription('This is the index used for this table.')
wwpLeosTableChgNotifOid = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 4, 1, 2), ObjectIdentifier()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosTableChgNotifOid.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableChgNotifOid.setDescription('This is the OID of the table for which device will send trap if anything changed in this table.')
wwpLeosTableChgNotifNumChanges = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 4, 1, 3), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosTableChgNotifNumChanges.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableChgNotifNumChanges.setDescription('This variables returns the counter value which keeps track of how many times wwpLeosTableChgNotifOid has changed. This counter is 32 bit counter and will never reset, except if device is rebooted.')
wwpLeosListenerTable = MibTable((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 5), )
if mibBuilder.loadTexts: wwpLeosListenerTable.setStatus('current')
if mibBuilder.loadTexts: wwpLeosListenerTable.setDescription("Table of EMS/NMS Ip's. This table uses Multiple Set operation to create entry in the table.")
wwpLeosListenerEntry = MibTableRow((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 5, 1), ).setIndexNames((0, "WWP-LEOS-TABLE-CHG-NOTIF-MIB", "wwpLeosListenerIndex"))
if mibBuilder.loadTexts: wwpLeosListenerEntry.setStatus('current')
if mibBuilder.loadTexts: wwpLeosListenerEntry.setDescription('Entry of each EMS/NMS who is interested to receive table change notification.')
wwpLeosListenerIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 10))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosListenerIndex.setStatus('current')
if mibBuilder.loadTexts: wwpLeosListenerIndex.setDescription('Specifies the unique index in the wwpLeosListenerTable.')
wwpLeosListenerAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 5, 1, 2), DisplayString()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: wwpLeosListenerAddr.setStatus('current')
if mibBuilder.loadTexts: wwpLeosListenerAddr.setDescription('This is the host name or ip address of the EMS/NMS.')
wwpLeosListenerResolvedIp = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 5, 1, 3), IpAddress()).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosListenerResolvedIp.setStatus('current')
if mibBuilder.loadTexts: wwpLeosListenerResolvedIp.setDescription('This shows the resolved IP address of the host name specified in wwpLeosListenerAddr. If wwpLeosListenerAddr is set to IpV4 type then wwpLeosListenerResolvedIp is equal to wwpLeosListenerAddr. If wwpLeosListenerAddr is set to dns type then wwpLeosListenerResolvedIp is equal to 0 if it is unresolved or is equal to resolved IP address.')
wwpLeosListenerStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 5, 1, 4), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: wwpLeosListenerStatus.setStatus('current')
if mibBuilder.loadTexts: wwpLeosListenerStatus.setDescription("Used to manage the creation and deletion of the conceptual rows in this table. To create a row in this table, a manager must set this object to 'createAndGo'. Setting this object to 'active' if it is already active will result in refreshing this entry. Setting this object to 'create and go' if this entry is already active will result in refreshing this entry. This table uses Multiple Set operation to create entry in the table. wwpLeosListenerAddr and wwpLeosListenerStatus and index are mandatory objects that need to be set when creating entry. While deleting entry ony index needs to be specified. ")
wwpLeosTableChgNotifIndexStr = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 7, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 255))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosTableChgNotifIndexStr.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableChgNotifIndexStr.setDescription("This variable contains the string of indexes that changed corresponding to table wwpLeosTableChgNotifOid. This variable is sent in the trap 'wwpLeosTableChgTrap'.")
wwpLeosListenerRefreshCount = MibScalar((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 1, 7, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(30, 1000))).setMaxAccess("readonly")
if mibBuilder.loadTexts: wwpLeosListenerRefreshCount.setStatus('current')
if mibBuilder.loadTexts: wwpLeosListenerRefreshCount.setDescription('This is the count value for each listener entry. when this count value becomes zero, the listener entry corresponding to this value will be deleted. This count value is sent in the trap.')
wwpLeosTableChgTrap = NotificationType((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 2, 0, 1)).setObjects(("WWP-LEOS-TABLE-CHG-NOTIF-MIB", "wwpLeosTableChgNotifOid"), ("WWP-LEOS-TABLE-CHG-NOTIF-MIB", "wwpLeosTableChgNotifNumChanges"), ("WWP-LEOS-TABLE-CHG-NOTIF-MIB", "wwpLeosTableChgNotifIndexStr"))
if mibBuilder.loadTexts: wwpLeosTableChgTrap.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableChgTrap.setDescription('A notification is sent whenever the table has changed. OID of the table and the counter associated with this table is sent in the trap.')
wwpLeosTableRefreshTrap = NotificationType((1, 3, 6, 1, 4, 1, 6141, 2, 60, 9, 2, 0, 2)).setObjects(("WWP-LEOS-TABLE-CHG-NOTIF-MIB", "wwpLeosListenerRefreshCount"))
if mibBuilder.loadTexts: wwpLeosTableRefreshTrap.setStatus('current')
if mibBuilder.loadTexts: wwpLeosTableRefreshTrap.setDescription('A notification is sent whenever the listener table entry needs to be refreshed.')
mibBuilder.exportSymbols("WWP-LEOS-TABLE-CHG-NOTIF-MIB", wwpLeosTableChgNotifTable=wwpLeosTableChgNotifTable, wwpLeosTableChgNotifMIBObjects=wwpLeosTableChgNotifMIBObjects, wwpLeosTableChgNotifEntry=wwpLeosTableChgNotifEntry, wwpLeosTableRefreshTrap=wwpLeosTableRefreshTrap, wwpLeosTableChgNotif=wwpLeosTableChgNotif, wwpLeosTableChgNotifMIBNotificationPrefix=wwpLeosTableChgNotifMIBNotificationPrefix, wwpLeosTableChgNotifMIB=wwpLeosTableChgNotifMIB, wwpLeosListenerResolvedIp=wwpLeosListenerResolvedIp, wwpLeosTableChgNotifMIBGroups=wwpLeosTableChgNotifMIBGroups, wwpLeosTableChgNotifMIBConformance=wwpLeosTableChgNotifMIBConformance, wwpLeosTableChgNotifOid=wwpLeosTableChgNotifOid, wwpLeosTableChgNotifMIBCompliances=wwpLeosTableChgNotifMIBCompliances, wwpLeosTableTrapNotifTimer=wwpLeosTableTrapNotifTimer, wwpLeosListenerAddr=wwpLeosListenerAddr, wwpLeosTableChgNotifIndexStr=wwpLeosTableChgNotifIndexStr, wwpLeosListenerRefreshTimer=wwpLeosListenerRefreshTimer, wwpLeosTableChgNotifNumChanges=wwpLeosTableChgNotifNumChanges, wwpLeosListenerRefreshCount=wwpLeosListenerRefreshCount, wwpLeosTableChgTrap=wwpLeosTableChgTrap, wwpLeosListenerTable=wwpLeosListenerTable, wwpLeosListenerIndex=wwpLeosListenerIndex, wwpLeosTableChgNotifIndex=wwpLeosTableChgNotifIndex, wwpLeosListenerEntry=wwpLeosListenerEntry, wwpLeosListenerStatus=wwpLeosListenerStatus, PYSNMP_MODULE_ID=wwpLeosTableChgNotifMIB, wwpLeosTableChgNotifMIBNotifications=wwpLeosTableChgNotifMIBNotifications)
| [
"[email protected]"
] | |
feff5ab73695206b96ceecd566fb5f3a48277960 | 685f4474699d769dae88537c69f5517ac13a8431 | /EL266.py | 03916d472efa7cdc10b541a5ebdc329e8c3f3e82 | [] | no_license | Pumafied/Project-Euler | 7466f48e449b7314598c106398c0be0424ae72d5 | 0c3e80a956893ce1881a9694131d52b156b9d3d8 | refs/heads/master | 2016-09-05T22:45:09.733696 | 2013-04-20T04:46:48 | 2013-04-20T04:46:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 364 | py | # The divisors of 12 are: 1,2,3,4,6 and 12.
# The largest divisor of 12 that does not exceed the square root of 12 is 3.
# We shall call the largest divisor of an integer n that does not exceed the square root of n the pseudo square root (PSR) of n.
# It can be seen that PSR(3102)=47.
# Let p be the product of the primes below 190.
# Find PSR(p) mod 1016. | [
"[email protected]"
] | |
a8c147db047fa4bcc792392e8c7a98ea3ea03be4 | 18576820ca4cfbecbfc8e8c05e9aaba0809e6138 | /fizeau_control_loop.py | 5ea265e3af610f8f96546e44790327fc23afc2ca | [] | no_license | mwanakijiji/lbti_fizeau_control | d12e3fcfa12725ab24c42acaaea6e83bd97e2f02 | 08cd6aa2ae77d028b3b5d794d6403bffe143b25d | refs/heads/master | 2021-07-09T09:13:06.175214 | 2018-12-10T16:24:44 | 2018-12-10T16:24:44 | 139,908,276 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,021 | py | #!/usr/bin/python
from lmircam_tools import *
from lmircam_tools.overlap_psfs import overlap_airy_psfs, overlap_grism_psfs
from lmircam_tools.dial_opd import optimize_opd_fizeau_grism, optimize_opd_fizeau_airy
from lmircam_tools.change_tt import optimize_tt_fizeau_airy
############## BEGIN GROSS OVERLAP OF NON-FIZEAU AIRY PSFS
psf_loc_setpoint = [1220,800] # pixel location for PSFs to be at
overlap_airy_psfs(psf_loc_setpoint) # filter-agnostic
############## END GROSS OVERLAP OF AIRY PSFS
############## BEGIN PUT IN GRISM AND REFINE GRISM-PSF OVERLAP
put_in_grism()
overlap_grism_psfs(psf_loc_setpoint)
############## END PUT IN GRISM AND REFINE GRISM-PSF OVERLAP
############## BEGIN DIAL OPD WITH HPC AND FIND CENTER OF COHERENCE ENVELOPE, THEN REMOVE GRISM
optimize_opd_fizeau_grism(psf_loc_setpoint) # might also use argument of the re-established Fizeau/grism PSF instead of the coordinate where it's supposed to be
remove_grism()
############## END DIAL OPD WITH HPC AND FIND CENTER OF COHERENCE ENVELOPE, THEN REMOVE GRISM
############## BEGIN HOLD CENTER OF SCIENCE COHERENCE ENVELOPE WITH HIGH-CONTRAST FRINGES
# maybe allow HPC to correct pathlength using science readouts, even though Phasecam not closed yet? or does this not have to be automatic, and we can just correct the Fizeau/Airy PSFs once Phasecam is closed?
############## END HOLD CENTER OF SCIENCE COHERENCE ENVELOPE WITH HIGH-CONTRAST FRINGES
############## TRANSLATE NIL + CLOSE PHASECAM LOOP HERE?
# might be manual step
############## BEGIN OPTIMIZE SCIENCE PSF BY FINDING OPD AND TT SETPOINTS ITERATIVELY
optimize_opd_fizeau_airy(psf_location)
optimize_tt_fizeau_airy(psf_location)
## adjust TT to optimize PSF; maybe iterate with OPD?
## note OPD movements cannot be more than 5 um with Phasecam closed
############## END OPTIMIZE SCIENCE PSF BY FINDING OPD AND TT SETPOINTS ITERATIVELY
############## ANY RUNNING STATS I WANT TO KEEP OF THE SCIENCE PSFS?
############## REDO EVERYTHING ONCE NOD HAPPENS
| [
"[email protected]"
] | |
ed86d450a200b1a3716031f000c74d3c8bc7d0b5 | 449f410b621049c4049a4f7d4b0858f53d56a7d7 | /tests/test_text.py | 6551f5ce92298f3a3be022e2e9f7888b1f6651cd | [
"MIT"
] | permissive | mvwicky/holdmypics | c02f25fd05d9694ff61d5839bd039a3a1bea4b01 | 194b135f885ef76d55975727a4a5125a6f9d33ee | refs/heads/main | 2023-05-10T19:36:20.978697 | 2023-05-06T21:27:29 | 2023-05-06T21:27:29 | 196,925,416 | 0 | 0 | MIT | 2023-03-31T15:23:01 | 2019-07-15T04:45:27 | Python | UTF-8 | Python | false | false | 5,151 | py | from __future__ import annotations
import imghdr
import io
import os
import time
from collections.abc import Callable
from typing import TYPE_CHECKING
from urllib.parse import urlencode
import pytest
from flask.testing import FlaskClient
from hypothesis import example, given, strategies as st
from loguru import logger
from PIL import Image
from tests.strategies import (
color_strategy,
dpi_strategy,
fmt_strategy,
opt_color_strategy,
size_strategy,
)
from tests.utils import compact_dict, make_route, size_id
if TYPE_CHECKING:
from holdmypics import Holdmypics
char_stragegy = st.characters(blacklist_categories=("Cc", "Cf", "Cs", "Co", "Cn"))
text_strategy = st.text(min_size=1, max_size=255, alphabet=char_stragegy)
long_text_strategy = st.text(min_size=16, max_size=255, alphabet=char_stragegy)
opt_text_strategt = st.one_of(st.none(), text_strategy)
args_strategy = st.fixed_dictionaries({"text": opt_text_strategt, "dpi": dpi_strategy})
def make_args(**kwargs: str | int | None):
from holdmypics.api.args import TextImageArgs
return TextImageArgs(**compact_dict(kwargs))
@given(
size=size_strategy,
img_fmt=fmt_strategy,
fg=color_strategy,
bg=color_strategy,
args=args_strategy,
)
@example(
size=(1920, 1080),
img_fmt="png",
fg="fff",
bg="000",
args={"text": "Some Text", "dpi": 300},
)
def test_create_images_using_function(
app_factory: Callable[[], Holdmypics],
size: tuple[int, int],
img_fmt: str,
fg: str,
bg: str,
args: dict[str, str | int | None],
):
from holdmypics.api.text import GeneratedTextImage
start = time.perf_counter()
with app_factory().test_request_context():
img_args = make_args(**args)
img = GeneratedTextImage(size, img_fmt, bg, fg, img_args)
assert img.get_save_kw()
p = img.get_path()
assert os.path.isfile(p)
assert os.path.getsize(p)
im = Image.open(p)
assert im.size == size
logger.debug("Elapsed: {0:.4f}", time.perf_counter() - start)
@given(
size=size_strategy,
img_fmt=fmt_strategy,
fg=opt_color_strategy,
bg=opt_color_strategy,
args=args_strategy,
)
def test_create_images_using_client(
app_factory: Callable[[], Holdmypics],
size: tuple[int, int],
img_fmt: str,
fg: str | None,
bg: str | None,
args: dict[str, str | int | None],
):
if bg is None and fg:
bg, fg = fg, None
start = time.perf_counter()
app = app_factory()
with app.test_client() as client:
url = make_route(
app,
"api.image_route",
size=size,
bg_color=bg,
fg_color=fg,
fmt=img_fmt,
**compact_dict(args),
)
# if args:
# url = "?".join((url, urlencode(compact_dict(args))))
res = client.get(url, follow_redirects=False)
assert res.status_code == 200
img_type = imghdr.what("filename", h=res.data)
assert img_type == img_fmt
im = Image.open(io.BytesIO(res.data))
assert im.size == size
logger.debug("Elapsed: {0:.4f}", time.perf_counter() - start)
def test_random_text_header(client: FlaskClient):
path = make_route(
client,
"api.image_route",
size=(638, 328),
bg_color="cef",
fg_color="555",
fmt="png",
random_text=True,
)
res = client.get(path, follow_redirects=False)
assert res.status_code == 200
assert "X-Random-Text" in res.headers
def test_random_text_ocr(client: FlaskClient):
pytesseract = pytest.importorskip("pytesseract", reason="pytesseract not installed")
path = make_route(
client,
"api.image_route",
size=(638, 328),
bg_color="cef",
fg_color="555",
fmt="png",
)
args = {"text": "Some Random Text", "dpi": None, "random_text": True}
query = urlencode({k: v for (k, v) in args.items() if v})
url = "?".join((path, query))
res = client.get(url, follow_redirects=False)
assert res.status_code == 200
img_type = imghdr.what("filename", h=res.data)
assert img_type == "png"
im = Image.open(io.BytesIO(res.data))
from_header = res.headers.get("X-Random-Text")
assert from_header is not None
from_ocr = pytesseract.image_to_string(im).strip()
logger.info("Got text from OCR: {0}", from_ocr)
assert from_ocr.casefold() == from_header.casefold()
@pytest.mark.parametrize(
"font_name", ["overpass", "fira-mono", "fira-sans", "roboto", "spectral"]
)
@pytest.mark.parametrize("size", [(3840, 2160), (960, 540)], ids=size_id)
def test_text_with_fonts(
app: Holdmypics, image_format: str, font_name: str, size: tuple[int, int]
):
from holdmypics.api.text import GeneratedTextImage
with app.test_request_context():
img_args = make_args(text=f"Text with font: {font_name}", font_name=font_name)
img = GeneratedTextImage(size, image_format, "cef", "555", img_args)
assert img.get_save_kw()
p = img.get_path()
assert os.path.isfile(p)
assert os.path.getsize(p)
| [
"[email protected]"
] | |
738b8cf3f59c9df7da1c0c7c55adfefafc2b9a16 | 9ba61317b33c4015ccfedd80a08532e6698c5b7f | /mnist_data/mnist_app.py | 59c99360f921f9ad12f26afc3ca2d477430a7abd | [] | no_license | csliuchang/tensorflow_project | 68ff7cdda3805747529de8e57bdcb94e8abaf4f2 | 0ff718d814ee2e181a35dd968e596421e6986616 | refs/heads/master | 2020-06-25T23:57:36.669965 | 2019-08-02T11:28:10 | 2019-08-02T11:28:10 | 199,460,996 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,147 | py | # coding utf-8
import tensorflow as tf
import numpy as np
from PIL import Image
import mnist_backward
import mnist_forward
# from .mnist_backward import MODEL_SAVE_PATH
def restore_model(testPicArr):
"""
" 创建一个默认图,在改图中执行以下操作"
args:
MOVING_AVERAGE_DECAY: 用于控制模型更新的速度,训练过程中会对每一个变量维护一个影子变量,这个影子变量的初始值
就是相应变量的初始值,每次变量更新时,影子变量就是随之更新。
preValue: axis返回每一行最大值的位置索引,得到概率最大的预测值
variables_to_restore: 通过使用variables_to_restore函数,可以使在加载模型的时候将影子变量直接映射到变量的本身,
所以我们在获取变量的滑动平均值的时候只需要获取到变量的本身值而不需要去获取影子变量。
"""
with tf.Graph().as_default() as tg:
x = tf.placeholder(tf.float32, [None, mnist_forward.INPUT_NODE])
y = mnist_forward.forward(x, None)
preValue = tf.argmax(y, 1)
variable_averages = tf.train.ExponentialMovingAverage(mnist_backward.MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
MODEL_SAVE_PATH = "./model/"
with tf.Session() as sess:
ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
preValue = sess.run(preValue, feed_dict={x: testPicArr})
return preValue
else:
print("No checkpoint file found")
return -1
def pre_pic(picName):
"""
ANTIALIAS: 抗锯齿
convert('L'): 变为灰度图
threshold : 阈值
"""
img = Image.open(picName)
reIm = img.resize((28, 28), Image.ANTIALIAS)
im_arr = np.array(reIm.convert('L'))
threshold = 50
for i in range(28):
for j in range(28):
im_arr[i][j] = 255 - im_arr[i][j]
if (im_arr[i][j] < threshold):
im_arr[i][j] = 0 # 纯黑色0
else: im_arr[i][j] = 255
nm_arr = im_arr.reshape([1, 784])
nm_arr = nm_arr.astype(np.float32)
img_ready = np.multiply(nm_arr, 1.0/255.0)
return img_ready
def application():
# testNum = input("input the number of test pictures:")
# for i in range(testNum):
# testPic = raw_input("the path of test picture:")
# testPicArr = pre_pic('./1.png')
# preValue = restore_model(testPicArr)
# print("The prediction number is", preValue)
# testPicArr = pre_pic('./2.png')
preValue = restore_model(pre_pic(raw_input("the path of test picture :")))
print("The prediction number is ", preValue)
def main():
application()
if __name__ == "__main__":
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
main() | [
"[email protected]"
] | |
0e529e884f05b337cd282d3977342a4aee9e70b4 | 5f73a8162b8c22f8f65c2c1a98b5a61502fb0ed3 | /viskit/frontend.py | 4a3a355fec35cebee5d079cd865cd968af6996c8 | [] | no_license | brandontrabucco/viskit | b6d18ba2d8f32779b0bb98fa550a80c6d6463ec2 | a546e51e13bd1cf0eb9fa5284f1c596b88eded50 | refs/heads/master | 2020-04-29T21:40:45.270104 | 2019-03-19T04:03:44 | 2019-03-19T04:03:44 | 176,420,386 | 0 | 0 | null | 2019-03-19T04:01:00 | 2019-03-19T04:01:00 | null | UTF-8 | Python | false | false | 30,527 | py | import sys
from viskit.core import AttrDict
sys.path.append('.')
import matplotlib
import os
matplotlib.use('Agg')
import flask # import Flask, render_template, send_from_directory
from viskit import core
import sys
import argparse
import json
import numpy as np
from plotly import tools
import plotly.offline as po
import plotly.graph_objs as go
def flatten(xs):
return [x for y in xs for x in y]
def sliding_mean(data_array, window=5):
data_array = np.array(data_array)
new_list = []
for i in range(len(data_array)):
indices = list(range(max(i - window + 1, 0),
min(i + window + 1, len(data_array))))
avg = 0
for j in indices:
avg += data_array[j]
avg /= float(len(indices))
new_list.append(avg)
return np.array(new_list)
import itertools
app = flask.Flask(__name__, static_url_path='/static')
exps_data = None
plottable_keys = None
distinct_params = None
@app.route('/js/<path:path>')
def send_js(path):
return flask.send_from_directory('js', path)
@app.route('/css/<path:path>')
def send_css(path):
return flask.send_from_directory('css', path)
def make_plot(
plot_lists,
use_median=False,
plot_width=None,
plot_height=None,
title=None,
):
"""
plot_lists is a list of lists.
Each outer list represents different y-axis attributes.
Each inner list represents different experiments to run, within that y-axis
attribute.
Each plot is an AttrDict which should have the elements used below.
"""
p25, p50, p75 = [], [], []
num_y_axes = len(plot_lists)
fig = tools.make_subplots(rows=num_y_axes, cols=1, print_grid=False)
fig['layout'].update(
width=plot_width,
height=plot_height,
title=title,
)
for y_idx, plot_list in enumerate(plot_lists):
for idx, plt in enumerate(plot_list):
color = core.color_defaults[idx % len(core.color_defaults)]
if use_median:
p25.append(np.mean(plt.percentile25))
p50.append(np.mean(plt.percentile50))
p75.append(np.mean(plt.percentile75))
x = list(range(len(plt.percentile50)))
y = list(plt.percentile50)
y_upper = list(plt.percentile75)
y_lower = list(plt.percentile25)
else:
x = list(range(len(plt.means)))
y = list(plt.means)
y_upper = list(plt.means + plt.stds)
y_lower = list(plt.means - plt.stds)
errors = go.Scatter(
x=x + x[::-1],
y=y_upper + y_lower[::-1],
fill='tozerox',
fillcolor=core.hex_to_rgb(color, 0.2),
line=go.scatter.Line(color=core.hex_to_rgb(color, 0)),
showlegend=False,
legendgroup=plt.legend,
hoverinfo='none'
)
values = go.Scatter(
x=x,
y=y,
name=plt.legend,
legendgroup=plt.legend,
line=dict(color=core.hex_to_rgb(color)),
)
# plotly is 1-indexed like matplotlib for subplots
y_idx_plotly = y_idx + 1
fig.append_trace(values, y_idx_plotly, 1)
fig.append_trace(errors, y_idx_plotly, 1)
fig['layout']['yaxis{}'.format(y_idx_plotly)].update(
title=plt.plot_key,
)
fig_div = po.plot(fig, output_type='div', include_plotlyjs=False)
if "footnote" in plot_list[0]:
footnote = "<br />".join([
r"<span><b>%s</b></span>: <span>%s</span>" % (
plt.legend, plt.footnote)
for plt in plot_list
])
return r"%s<div>%s</div>" % (fig_div, footnote)
else:
return fig_div
def make_plot_eps(plot_list, use_median=False, counter=0):
import matplotlib.pyplot as _plt
f, ax = _plt.subplots(figsize=(8, 5))
for idx, plt in enumerate(plot_list):
color = core.color_defaults[idx % len(core.color_defaults)]
if use_median:
x = list(range(len(plt.percentile50)))
y = list(plt.percentile50)
y_upper = list(plt.percentile75)
y_lower = list(plt.percentile25)
else:
x = list(range(len(plt.means)))
y = list(plt.means)
y_upper = list(plt.means + plt.stds)
y_lower = list(plt.means - plt.stds)
plt.legend = plt.legend.replace('rllab.algos.trpo.TRPO', 'TRPO')
plt.legend = plt.legend.replace('rllab.algos.vpg.VPG', 'REINFORCE')
plt.legend = plt.legend.replace('rllab.algos.erwr.ERWR', 'ERWR')
plt.legend = plt.legend.replace('sandbox.rein.algos.trpo_vime.TRPO',
'TRPO+VIME')
plt.legend = plt.legend.replace('sandbox.rein.algos.vpg_vime.VPG',
'REINFORCE+VIME')
plt.legend = plt.legend.replace('sandbox.rein.algos.erwr_vime.ERWR',
'ERWR+VIME')
plt.legend = plt.legend.replace('0.0001', '1e-4')
# plt.legend = plt.legend.replace('0.001', 'TRPO+VIME')
# plt.legend = plt.legend.replace('0', 'TRPO')
# plt.legend = plt.legend.replace('0.005', 'TRPO+L2')
if idx == 0:
plt.legend = 'TRPO (0.0)'
if idx == 1:
plt.legend = 'TRPO+VIME (103.7)'
if idx == 2:
plt.legend = 'TRPO+L2 (0.0)'
ax.fill_between(
x, y_lower, y_upper, interpolate=True, facecolor=color,
linewidth=0.0, alpha=0.3)
if idx == 2:
ax.plot(x, y, color=color, label=plt.legend, linewidth=2.0,
linestyle="--")
else:
ax.plot(x, y, color=color, label=plt.legend, linewidth=2.0)
ax.grid(True)
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
if counter == 1:
# ax.set_xlim([0, 120])
ax.set_ylim([-3, 60])
# ax.set_xlim([0, 80])
loc = 'upper left'
elif counter == 2:
ax.set_ylim([-0.04, 0.4])
# ax.set_ylim([-0.1, 0.4])
ax.set_xlim([0, 2000])
loc = 'upper left'
elif counter == 3:
# ax.set_xlim([0, 1000])
loc = 'lower right'
elif counter == 4:
# ax.set_xlim([0, 800])
# ax.set_ylim([0, 2])
loc = 'lower right'
leg = ax.legend(loc=loc, prop={'size': 12}, ncol=1)
for legobj in leg.legendHandles:
legobj.set_linewidth(5.0)
def y_fmt(x, y):
return str(int(np.round(x / 1000.0))) + 'K'
import matplotlib.ticker as tick
# ax.xaxis.set_major_formatter(tick.FuncFormatter(y_fmt))
_plt.savefig('tmp' + str(counter) + '.pdf', bbox_inches='tight')
def summary_name(exp, selector=None):
# if selector is not None:
# exclude_params = set([x[0] for x in selector._filters])
# else:
# exclude_params = set()
# rest_params = set([x[0] for x in distinct_params]).difference(exclude_params)
# if len(rest_params) > 0:
# name = ""
# for k in rest_params:
# name += "%s=%s;" % (k.split(".")[-1], str(exp.flat_params.get(k, "")).split(".")[-1])
# return name
return exp.params["exp_name"]
def check_nan(exp):
return all(
not np.any(np.isnan(vals)) for vals in list(exp.progress.values()))
def get_plot_instruction(
plot_keys,
split_keys=None,
group_keys=None,
best_filter_key=None,
filters=None,
exclusions=None,
use_median=False,
only_show_best=False,
best_based_on_final=False,
gen_eps=False,
only_show_best_sofar=False,
best_is_lowest=False,
clip_plot_value=None,
plot_width=None,
plot_height=None,
filter_nan=False,
smooth_curve=False,
custom_filter=None,
legend_post_processor=None,
normalize_error=False,
custom_series_splitter=None,
):
"""
A custom filter might look like
"lambda exp: exp.flat_params['algo_params_base_kwargs.batch_size'] == 64"
"""
if filter_nan:
nonnan_exps_data = list(filter(check_nan, exps_data))
selector = core.Selector(nonnan_exps_data)
else:
selector = core.Selector(exps_data)
if legend_post_processor is None:
legend_post_processor = lambda x: x
if filters is None:
filters = dict()
if exclusions is None:
exclusions = []
if split_keys is None:
split_keys = []
if group_keys is None:
group_keys = []
if plot_height is None:
plot_height = 300 * len(plot_keys)
for k, v in filters.items():
selector = selector.where(k, str(v))
for k, v in exclusions:
selector = selector.where_not(k, str(v))
if custom_filter is not None:
selector = selector.custom_filter(custom_filter)
if len(split_keys) > 0:
split_selectors, split_titles = split_by_keys(
selector, split_keys, distinct_params
)
else:
split_selectors = [selector]
split_titles = ["Plot"]
plots = []
counter = 1
print("Plot_keys:", plot_keys)
print("split_keys:", split_keys)
print("group_keys:", group_keys)
print("filters:", filters)
print("exclusions:", exclusions)
for split_selector, split_title in zip(split_selectors, split_titles):
if custom_series_splitter is not None:
exps = split_selector.extract()
splitted_dict = dict()
for exp in exps:
key = custom_series_splitter(exp)
if key not in splitted_dict:
splitted_dict[key] = list()
splitted_dict[key].append(exp)
splitted = list(splitted_dict.items())
group_selectors = [core.Selector(list(x[1])) for x in splitted]
group_legends = [x[0] for x in splitted]
else:
if len(group_keys) > 0:
group_selectors, group_legends = split_by_keys(
split_selector, group_keys, distinct_params
)
else:
group_selectors = [split_selector]
group_legends = [split_title]
list_of_list_of_plot_dicts = []
for plot_key in plot_keys:
to_plot = []
for group_selector, group_legend in zip(group_selectors, group_legends):
filtered_data = group_selector.extract()
if len(filtered_data) == 0:
continue
if (best_filter_key
and best_filter_key not in group_keys
and best_filter_key not in split_keys):
selectors = split_by_key(
group_selector, best_filter_key, distinct_params
)
scores = [
get_selector_score(plot_key, selector, use_median, best_based_on_final)
for selector in selectors
]
if np.isfinite(scores).any():
if best_is_lowest:
best_idx = np.nanargmin(scores)
else:
best_idx = np.nanargmax(scores)
best_selector = selectors[best_idx]
filtered_data = best_selector.extract()
print("For split '{0}', group '{1}':".format(
split_title,
group_legend,
))
print(" best '{0}': {1}".format(
best_filter_key,
dict(best_selector._filters)[best_filter_key]
))
if only_show_best or only_show_best_sofar:
# Group by seed and sort.
# -----------------------
filtered_params = core.extract_distinct_params(
filtered_data, l=0)
filtered_params2 = [p[1] for p in filtered_params]
filtered_params_k = [p[0] for p in filtered_params]
product_space = list(itertools.product(
*filtered_params2
))
data_best_regret = None
best_regret = np.inf if best_is_lowest else -np.inf
kv_string_best_regret = None
for idx, params in enumerate(product_space):
selector = core.Selector(exps_data)
for k, v in zip(filtered_params_k, params):
selector = selector.where(k, str(v))
data = selector.extract()
if len(data) > 0:
progresses = [
exp.progress.get(plot_key, np.array([np.nan]))
for exp in data
]
sizes = list(map(len, progresses))
max_size = max(sizes)
progresses = [
np.concatenate(
[ps, np.ones(max_size - len(ps)) * np.nan])
for ps in progresses]
if best_based_on_final:
progresses = np.asarray(progresses)[:, -1]
if only_show_best_sofar:
if best_is_lowest:
progresses = np.min(np.asarray(progresses),
axis=1)
else:
progresses = np.max(np.asarray(progresses),
axis=1)
if use_median:
medians = np.nanmedian(progresses, axis=0)
regret = np.mean(medians)
else:
means = np.nanmean(progresses, axis=0)
regret = np.mean(means)
distinct_params_k = [p[0] for p in distinct_params]
distinct_params_v = [
v for k, v in zip(filtered_params_k, params) if
k in distinct_params_k]
distinct_params_kv = [
(k, v) for k, v in
zip(distinct_params_k, distinct_params_v)]
distinct_params_kv_string = str(
distinct_params_kv).replace('), ', ')\t')
print(
'{}\t{}\t{}'.format(regret, len(progresses),
distinct_params_kv_string))
if best_is_lowest:
change_regret = regret < best_regret
else:
change_regret = regret > best_regret
if change_regret:
best_regret = regret
best_progress = progresses
data_best_regret = data
kv_string_best_regret = distinct_params_kv_string
print(group_selector._filters)
print('best regret: {}'.format(best_regret))
# -----------------------
if np.isfinite(best_regret):
progresses = [
exp.progress.get(plot_key, np.array([np.nan])) for
exp in data_best_regret]
# progresses = [progress[:500] for progress in progresses ]
sizes = list(map(len, progresses))
# more intelligent:
max_size = max(sizes)
progresses = [
np.concatenate(
[ps, np.ones(max_size - len(ps)) * np.nan]) for
ps in progresses]
legend = '{} (mu: {:.3f}, std: {:.5f})'.format(
group_legend, best_regret, np.std(best_progress))
window_size = np.maximum(
int(np.round(max_size / float(1000))), 1)
statistics = get_statistics(
progresses, use_median, normalize_error,
)
statistics = process_statistics(
statistics,
smooth_curve,
clip_plot_value,
window_size,
)
to_plot.append(
AttrDict(
legend=legend_post_processor(legend),
plot_key=plot_key,
**statistics
)
)
if len(to_plot) > 0 and len(data) > 0:
to_plot[-1]["footnote"] = "%s; e.g. %s" % (
kv_string_best_regret,
data[0].params.get("exp_name", "NA"))
else:
to_plot[-1]["footnote"] = ""
else:
progresses = [
exp.progress.get(plot_key, np.array([np.nan])) for exp
in filtered_data
]
sizes = list(map(len, progresses))
# more intelligent:
max_size = max(sizes)
progresses = [
np.concatenate(
[ps, np.ones(max_size - len(ps)) * np.nan]) for ps
in progresses]
window_size = np.maximum(
int(np.round(max_size / float(100))),
1,
)
statistics = get_statistics(
progresses, use_median, normalize_error,
)
statistics = process_statistics(
statistics,
smooth_curve,
clip_plot_value,
window_size,
)
to_plot.append(
AttrDict(
legend=legend_post_processor(group_legend),
plot_key=plot_key,
**statistics
)
)
if len(to_plot) > 0:
list_of_list_of_plot_dicts.append(to_plot)
if len(list_of_list_of_plot_dicts) > 0 and not gen_eps:
fig_title = split_title
plots.append(make_plot(
list_of_list_of_plot_dicts,
use_median=use_median, title=fig_title,
plot_width=plot_width, plot_height=plot_height
))
if gen_eps:
make_plot_eps(to_plot, use_median=use_median, counter=counter)
counter += 1
return "\n".join(plots)
def shorten_key(key):
"""
Convert a dot-map string like "foo.bar.baz" into "f.b.baz"
"""
*heads, tail = key.split(".")
new_key_builder = []
for subkey in heads:
if len(subkey) > 0:
new_key_builder.append(subkey[0])
new_key_builder.append(tail)
return ".".join(new_key_builder)
def get_selector_score(key, selector, use_median, best_based_on_final):
"""
:param key: Thing to measure (e.g. Average Returns, Loss, etc.)
:param selector: Selector instance
:param use_median: Use the median? Else use the mean
:param best_based_on_final: Only look at the final value? Else use all
values.
:return: A single number that gives the score of `key` inside `selector`
"""
data = selector.extract()
if best_based_on_final:
values = [
exp.progress.get(key, np.array([np.nan]))[-1]
for exp in data
]
else:
values = np.concatenate([
exp.progress.get(key, np.array([np.nan]))
for exp in data
] or [[np.nan]])
if len(values) == 0 or not np.isfinite(values).all():
return np.nan
if use_median:
return np.nanpercentile(values, q=50, axis=0)
else:
return np.nanmean(values)
def get_statistics(progresses, use_median, normalize_errors):
"""
Get some dictionary of statistics (e.g. the median, mean).
:param progresses:
:param use_median:
:param normalize_errors:
:return:
"""
if use_median:
return dict(
percentile25=np.nanpercentile(progresses, q=25, axis=0),
percentile50=np.nanpercentile(progresses, q=50, axis=0),
percentile75=np.nanpercentile(progresses, q=75, axis=0),
)
else:
stds = np.nanstd(progresses, axis=0)
if normalize_errors:
stds /= np.sqrt(np.sum((1. - np.isnan(progresses)), axis=0))
return dict(
means=np.nanmean(progresses, axis=0),
stds=stds,
)
def process_statistics(
statistics,
smooth_curve,
clip_plot_value,
window_size
):
"""
Smoothen and clip time-series data.
"""
clean_statistics = {}
for k, v in statistics.items():
clean_statistics[k] = v
if smooth_curve:
clean_statistics[k] = sliding_mean(v, window=window_size)
if clip_plot_value is not None:
clean_statistics[k] = np.clip(
clean_statistics[k],
-clip_plot_value,
clip_plot_value,
)
return clean_statistics
def get_possible_values(distinct_params, key):
return [vs for k, vs in distinct_params if k == key][0]
def split_by_key(selector, key, distinct_params):
"""
Return a list of selectors based on this selector.
Each selector represents one distinct value of `key`.
"""
values = get_possible_values(distinct_params, key)
return [selector.where(key, v) for v in values]
def split_by_keys(base_selector, keys, distinct_params):
"""
Return a list of selectors based on the base_selector.
Each selector represents one distinct set of values for each key in `keys`.
:param base_selector:
:param keys:
:param distinct_params:
:return:
"""
list_of_key_and_unique_value = [
[
(key, v)
for v in get_possible_values(distinct_params, key)
]
for key in keys
]
"""
elements of list_of_key_and_unique_value should look like:
- [(color, red), (color, blue), (color, green), ...]
- [(season, spring), (season, summer), (season, fall), ...]
We now take the cartesian product so that we get all the
combinations, like:
- [(color, red), (season, spring)]
- [(color, blue), (season, spring)]
- ...
"""
selectors = []
descriptions = []
for key_and_value_list in itertools.product(
*list_of_key_and_unique_value
):
selector = None
keys = []
for key, value in key_and_value_list:
keys.append(key)
if selector is None:
selector = base_selector.where(key, value)
else:
selector = selector.where(key, value)
selectors.append(selector)
descriptions.append(", ".join([
"{0}={1}".format(
shorten_key(key),
value,
)
for key, value in key_and_value_list
]))
return selectors, descriptions
def parse_float_arg(args, key):
x = args.get(key, "")
try:
return float(x)
except Exception:
return None
@app.route("/plot_div")
def plot_div():
args = flask.request.args
plot_keys_json = args.get("plot_keys")
plot_keys = json.loads(plot_keys_json)
split_keys_json = args.get("split_keys", "[]")
split_keys = json.loads(split_keys_json)
group_keys_json = args.get("group_keys", "[]")
group_keys = json.loads(group_keys_json)
best_filter_key = args.get("best_filter_key", "")
filters_json = args.get("filters", "{}")
filters = json.loads(filters_json)
exclusions_json = args.get("exclusions", "{}")
exclusions = json.loads(exclusions_json)
if len(best_filter_key) == 0:
best_filter_key = None
use_median = args.get("use_median", "") == 'True'
gen_eps = args.get("eps", "") == 'True'
only_show_best = args.get("only_show_best", "") == 'True'
best_based_on_final = args.get("best_based_on_final", "") == 'True'
only_show_best_sofar = args.get("only_show_best_sofar", "") == 'True'
best_is_lowest = args.get("best_is_lowest", "") == 'True'
normalize_error = args.get("normalize_error", "") == 'True'
filter_nan = args.get("filter_nan", "") == 'True'
smooth_curve = args.get("smooth_curve", "") == 'True'
clip_plot_value = parse_float_arg(args, "clip_plot_value")
plot_width = parse_float_arg(args, "plot_width")
plot_height = parse_float_arg(args, "plot_height")
custom_filter = args.get("custom_filter", None)
custom_series_splitter = args.get("custom_series_splitter", None)
if custom_filter is not None and len(custom_filter.strip()) > 0:
custom_filter = safer_eval(custom_filter)
else:
custom_filter = None
legend_post_processor = args.get("legend_post_processor", None)
if legend_post_processor is not None and len(
legend_post_processor.strip()) > 0:
legend_post_processor = safer_eval(legend_post_processor)
else:
legend_post_processor = None
if custom_series_splitter is not None and len(
custom_series_splitter.strip()) > 0:
custom_series_splitter = safer_eval(custom_series_splitter)
else:
custom_series_splitter = None
plot_div = get_plot_instruction(
plot_keys=plot_keys,
split_keys=split_keys,
filter_nan=filter_nan,
group_keys=group_keys,
best_filter_key=best_filter_key,
filters=filters,
exclusions=exclusions,
use_median=use_median,
gen_eps=gen_eps,
only_show_best=only_show_best,
best_based_on_final=best_based_on_final,
only_show_best_sofar=only_show_best_sofar,
best_is_lowest=best_is_lowest,
clip_plot_value=clip_plot_value,
plot_width=plot_width,
plot_height=plot_height,
smooth_curve=smooth_curve,
custom_filter=custom_filter,
legend_post_processor=legend_post_processor,
normalize_error=normalize_error,
custom_series_splitter=custom_series_splitter,
)
return plot_div
def safer_eval(some_string):
"""
Not full-proof, but taking advice from:
https://nedbatchelder.com/blog/201206/eval_really_is_dangerous.html
"""
if "__" in some_string or "import" in some_string:
raise Exception("string to eval looks suspicious")
return eval(some_string, {'__builtins__': {}})
@app.route("/")
def index():
if "AverageReturn" in plottable_keys:
plot_keys = ["AverageReturn"]
elif len(plottable_keys) > 0:
plot_keys = plottable_keys[0:1]
else:
plot_keys = None
plot_div = get_plot_instruction(plot_keys=plot_keys)
return flask.render_template(
"main.html",
plot_div=plot_div,
plot_keys=plot_keys,
group_keys=[],
plottable_keys=plottable_keys,
distinct_param_keys=[str(k) for k, v in distinct_params],
distinct_params=dict([(str(k), list(map(str, v)))
for k, v in distinct_params]),
)
def reload_data(data_filename):
global exps_data
global plottable_keys
global distinct_params
exps_data = core.load_exps_data(
args.data_paths,
data_filename,
args.disable_variant,
)
plottable_keys = list(
set(flatten(list(exp.progress.keys()) for exp in exps_data)))
plottable_keys = sorted([k for k in plottable_keys if k is not None])
distinct_params = sorted(core.extract_distinct_params(exps_data))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("data_paths", type=str, nargs='*')
parser.add_argument("--prefix", type=str, nargs='?', default="???")
parser.add_argument("--debug", action="store_true", default=False)
parser.add_argument("--port", type=int, default=5000)
parser.add_argument("--disable-variant", default=False, action='store_true')
parser.add_argument("--dname", default='progress.csv', help='name of data file')
args = parser.parse_args(sys.argv[1:])
# load all folders following a prefix
if args.prefix != "???":
args.data_paths = []
dirname = os.path.dirname(args.prefix)
subdirprefix = os.path.basename(args.prefix)
for subdirname in os.listdir(dirname):
path = os.path.join(dirname, subdirname)
if os.path.isdir(path) and (subdirprefix in subdirname):
args.data_paths.append(path)
print("Importing data from {path}...".format(path=args.data_paths))
reload_data(args.dname)
port = args.port
try:
print("View http://localhost:%d in your browser" % port)
app.run(host='0.0.0.0', port=port, debug=args.debug)
except OSError as e:
if e.strerror == 'Address already in use':
print("Port {} is busy. Try specifying a different port with ("
"e.g.) --port=5001".format(port))
| [
"[email protected]"
] | |
e87fd0af770bdebc927b353eaa39dd78d958400f | d75f2550f0493a153e144759c8c6c378a4c436b2 | /base_kivy_app/graphics.py | e766af9a31226890faa523de76926c8702df8b2d | [
"MIT"
] | permissive | healthonrails/base_kivy_app | c946fab1e130cd4b40eadc45116a2c4eabf460cf | 47ade9665c79850909d4f2b6bae3acebaa1a0a3c | refs/heads/master | 2020-12-10T18:55:36.819462 | 2020-01-03T23:36:46 | 2020-01-03T23:36:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,652 | py | '''Graphics
============
'''
from os.path import join, dirname
import math
from time import perf_counter
from functools import partial
from inspect import isclass
from math import pow, fabs
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.properties import (
NumericProperty, ReferenceListProperty, ObjectProperty,
ListProperty, StringProperty, BooleanProperty, DictProperty, AliasProperty,
OptionProperty)
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.scatter import Scatter
from kivy.uix.spinner import Spinner, SpinnerOption
from kivy.graphics.texture import Texture
from kivy.graphics import Rectangle, BindTexture
from kivy.graphics.transformation import Matrix
from kivy.graphics.fbo import Fbo
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.core.window import Window
from kivy.metrics import dp
from kivy.uix.behaviors.button import ButtonBehavior
from kivy.uix.behaviors.focus import FocusBehavior
from kivy.animation import Sequence, Animation
from kivy.factory import Factory
from kivy.compat import string_types
from kivy.uix.dropdown import DropDown
from kivy.uix.textinput import TextInput
from kivy.uix.slider import Slider
from base_kivy_app.utils import pretty_time
__all__ = (
'EventFocusBehavior', 'BufferImage', 'ErrorIndicatorBase', 'TimeLineSlice',
'TimeLine', 'AutoSizedSpinner', 'EmptyDropDown', 'HighightButtonBehavior')
Builder.load_file(join(dirname(__file__), 'graphics.kv'))
class AutoSizedSpinnerBehavior(object):
'''Spinner that exposes :attr:`minimum_size`, which is the size
required to display the texture of the largest item in the spinner.
'''
minimum_size = ObjectProperty((0, 0))
'''A 2-tuple containing the texture width and height of the spinner item
with the largest texture. Can be used to set the spinner size to ensure it
will be big enough to display nicely the largest item.
'''
def __init__(self, **kwargs):
cls = kwargs.pop('option_cls', self.option_cls)
if isinstance(cls, string_types):
cls = Factory.get(cls)
self.option_cls = partial(self._decorate_class, cls)
def decorate_cls(*largs):
cls = self.option_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
if not isclass(cls) or not issubclass(cls, Widget):
return
self.option_cls = partial(self._decorate_class, cls)
self.fbind('option_cls', decorate_cls)
self.fbind('texture_size', self._update_min_size)
self.fbind('padding', self._update_min_size)
super(AutoSizedSpinnerBehavior, self).__init__(**kwargs)
self._update_min_size()
def _decorate_class(self, cls, *l, **kw):
wid = cls(*l, **kw)
wid.fbind('texture_size', self._update_min_size)
self._update_min_size()
return wid
def _update_min_size(self, *largs):
if not self._dropdown or not self._dropdown.container:
widgets = [self]
else:
widgets = self._dropdown.container.children + [self]
w = max((c.texture_size[0] for c in widgets))
h = max((c.texture_size[1] for c in widgets))
self.minimum_size = w + 2 * self.padding_x, h + 2 * self.padding_y
class EmptyDropDown(DropDown):
def __init__(self, **kwargs):
super(EmptyDropDown, self).__init__(container=None, **kwargs)
class FollowingLabel(Label):
attached_widget = None
def show_label(self, widget):
self.attached_widget = widget
Window.add_widget(self)
widget.fbind('center', self._reposition)
self.fbind('size', self._reposition)
def hide_label(self):
Window.remove_widget(self)
self.attached_widget.funbind('center', self._reposition)
self.funbind('size', self._reposition)
def _reposition(self, *largs):
# calculate the coordinate of the attached widget in the window
# coordinate system
win = Window
widget = self.attached_widget
wx, wy = widget.to_window(*widget.pos)
_, wtop = widget.to_window(widget.right, widget.top)
# ensure the dropdown list doesn't get out on the X axis, with a
# preference to 0 in case the list is too wide.
x = wx
if x + self.width > win.width:
x = win.width - self.width
if x < 0:
x = 0
self.x = x
# determine if we display the dropdown upper or lower to the widget
height = self.height
h_bottom = wy - height
h_top = win.height - (wtop + height)
if h_bottom > 0:
self.top = wy
elif h_top > 0:
self.y = wtop
else:
if h_top < h_bottom:
self.top = self.height = wy
else:
self.y = wtop
Builder.load_string('''
<FollowingLabel>:
size_hint: None, None
size: self.texture_size
padding: '6dp', '6dp'
color: 0, 0, 0, 1
canvas.before:
Color:
rgba: 1, 1, 1, 1
Rectangle:
size: self.size
pos: self.pos
<HighightButtonBehavior>:
canvas.after:
Color:
a: .3 if self.hovering else 0
Rectangle:
pos: self.pos
size: self.size
''')
class HighightButtonBehavior(object):
show_hover = BooleanProperty(True)
hover_text = StringProperty('')
hovering = BooleanProperty(False)
attached_widget = None
tracked_widgets = []
label = None
def __init__(self, **kwargs):
super(HighightButtonBehavior, self).__init__(**kwargs)
if self.show_hover:
self.tracked_widgets.append(self.proxy_ref)
def on_show_hover(self, *largs):
if self.show_hover:
self.tracked_widgets.append(self.proxy_ref)
else:
if self.hovering:
self.detach_widget()
self.tracked_widgets.remove(self.proxy_ref)
def on_hover_text(self, *largs):
if self.hovering and self.label:
self.label.text = self.hover_text
@staticmethod
def init_class():
Window.fbind('mouse_pos', HighightButtonBehavior.track_mouse)
HighightButtonBehavior.label = FollowingLabel(markup=True)
@staticmethod
def uninit_class():
Window.funbind('mouse_pos', HighightButtonBehavior.track_mouse)
HighightButtonBehavior.label = None
HighightButtonBehavior.attached_widget = None
del HighightButtonBehavior.tracked_widgets[:]
def attach_widget(self):
self.hovering = True
if self.hover_text and self.label is not None:
self.label.show_label(self)
self.label.text = self.hover_text
HighightButtonBehavior.attached_widget = self
def detach_widget(self):
self.hovering = False
HighightButtonBehavior.attached_widget = None
if self.hover_text and self.label is not None:
self.label.hide_label()
@staticmethod
def track_mouse(instance, pos):
widget = HighightButtonBehavior.attached_widget
if widget:
if widget.collide_point(*widget.to_widget(*pos)):
return
else:
widget.detach_widget()
for widget in HighightButtonBehavior.tracked_widgets:
try:
if widget.collide_point(*widget.to_widget(*pos)):
widget.attach_widget()
break
except ReferenceError:
pass
class SpinnerBehavior(AutoSizedSpinnerBehavior):
values = ListProperty()
text_autoupdate = BooleanProperty(False)
option_cls = ObjectProperty(SpinnerOption)
dropdown_cls = ObjectProperty(DropDown)
is_open = BooleanProperty(False)
sync_height = BooleanProperty(False)
def __init__(self, **kwargs):
self._dropdown = None
super(SpinnerBehavior, self).__init__(**kwargs)
fbind = self.fbind
build_dropdown = self._build_dropdown
fbind('on_release', self._toggle_dropdown)
fbind('dropdown_cls', build_dropdown)
fbind('option_cls', build_dropdown)
fbind('values', self._update_dropdown)
fbind('size', self._update_dropdown_size)
fbind('text_autoupdate', self._update_dropdown)
build_dropdown()
def _build_dropdown(self, *largs):
if self._dropdown:
self._dropdown.unbind(on_select=self._on_dropdown_select)
self._dropdown.unbind(on_dismiss=self._close_dropdown)
self._dropdown.dismiss()
self._dropdown = None
cls = self.dropdown_cls
if isinstance(cls, string_types):
cls = Factory.get(cls)
self._dropdown = cls()
self._dropdown.bind(on_select=self._on_dropdown_select)
self._dropdown.bind(on_dismiss=self._close_dropdown)
self._update_dropdown()
def _update_dropdown_size(self, *largs):
if not self.sync_height:
return
dp = self._dropdown
if not dp:
return
container = dp.container
if not container:
return
h = self.height
for item in container.children[:]:
item.height = h
def _update_dropdown(self, *largs):
dp = self._dropdown
cls = self.option_cls
values = self.values
text_autoupdate = self.text_autoupdate
if isinstance(cls, string_types):
cls = Factory.get(cls)
dp.clear_widgets()
for value in values:
item = cls(text=value)
item.height = self.height if self.sync_height else item.height
item.bind(on_release=lambda option: dp.select(option.text))
dp.add_widget(item)
if text_autoupdate:
if values:
if not self.text or self.text not in values:
self.text = values[0]
else:
self.text = ''
def _toggle_dropdown(self, *largs):
if self.values:
self.is_open = not self.is_open
def _close_dropdown(self, *largs):
self.is_open = False
def _on_dropdown_select(self, instance, data, *largs):
self.text = data
self.is_open = False
def on_is_open(self, instance, value):
if value:
self._dropdown.open(self)
else:
if self._dropdown.attach_to:
self._dropdown.dismiss()
class AutoSizedSpinner(AutoSizedSpinnerBehavior, Spinner):
pass
class EventFocusBehavior(FocusBehavior):
''':class:`~kivy.uix.behaviors.focus.FocusBehavior` based class which
converts keyboard events listed in :attr:`keys` into a ``on_key_press`` or
``on_key_release`` event.
:Events:
`on_key_press`:
Triggered when a key that is in :attr:`keys` is pressed.
`on_key_release`:
Triggered when a key that is in :attr:`keys` is released.
'''
__events__ = ('on_key_press', 'on_key_release')
keys = ListProperty(['spacebar', 'escape', 'enter'])
'''A list of strings that are potential keyboard keys, which trigger
key press or key release events.
Defaults to `['spacebar', 'escape', 'enter']`.
'''
def keyboard_on_key_down(self, window, keycode, text, modifiers):
if super(EventFocusBehavior, self).keyboard_on_key_down(
window, keycode, text, modifiers):
return True
if keycode[1] in self.keys:
return self.dispatch('on_key_press', keycode[1])
def keyboard_on_key_up(self, window, keycode):
if super(EventFocusBehavior, self).keyboard_on_key_up(window, keycode):
return True
if keycode[1] in self.keys:
return self.dispatch('on_key_release', keycode[1])
def on_key_press(self, key):
pass
def on_key_release(self, key):
pass
class BufferImage(Scatter):
'''Class that displays an image and allows its manipulation using touch.
It receives an ffpyplayer :py:class:`~ffpyplayer.pic.Image` object.
'''
scale_to_image = BooleanProperty(True)
flip = BooleanProperty(False)
_iw = NumericProperty(0.)
'''The width of the input image. '''
_ih = NumericProperty(0.)
'''The height of the input image. '''
available_size = ObjectProperty(None, allownone=True)
'''The size that the widget has available for drawing.
'''
_last_w = 0
'''The width of the screen region available to display the image. Can be
used to determine if the screen size changed and we need to output a
different sized image.
'''
_last_h = 0
'''The width of the screen region available to display the image. '''
_last_rotation = 0
image_size = ObjectProperty((0, 0))
'''The size of the last image.
'''
_fmt = ''
'''The input format of the last image passed in, if the format is
supported. E.g. rgb24, yuv420p, etc. Otherwise, it's the forma into which
the unsupported image is converted into.
'''
_sw_src_fmt = ''
'''The input format of the last image passed in. '''
_swscale = None
'''The SWScale object that converts the image into a supported format. '''
img = None
'''Holds the last :class:`~ffpyplayer.pic.Image` passed in. '''
texture_size = ObjectProperty((0, 0))
'''A tuple with the size of the last :class:`~ffpyplayer.pic.Image`
that was passed in.
'''
img_texture = ObjectProperty(None)
'''The texture into which the images are blitted. Defaults to None. '''
color = ListProperty([1, 1, 1, 1])
'''The color in which to display the image. '''
_kivy_ofmt = ''
'''Kivy's color format of the image passed in. '''
_tex_y = None
''' The y texture into which the y plane of the images are blitted when
yuv420p.
'''
_tex_u = None
''' The u texture into which the u plane of the images are blitted when
yuv420p.
'''
_tex_v = None
''' The v texture into which the v plane of the images are blitted when
yuv420p.
'''
_fbo = None
''' The Fbo used when blitting yuv420p images. '''
_YUV_RGB_FS = '''
$HEADER$
uniform sampler2D tex_y;
uniform sampler2D tex_u;
uniform sampler2D tex_v;
void main(void) {
float y = texture2D(tex_y, tex_coord0).r;
float u = texture2D(tex_u, tex_coord0).r - 0.5;
float v = texture2D(tex_v, tex_coord0).r - 0.5;
float r = y + 1.402 * v;
float g = y - 0.344 * u - 0.714 * v;
float b = y + 1.772 * u;
gl_FragColor = vec4(r, g, b, 1.0);
}
'''
def on_flip(self, *largs):
self.update_img(self.img, True)
def update_img(self, img, force=False):
''' Updates the screen with a new image.
:Parameters:
`img`: :class:`~ffpyplayer.pic.Image` instance
The image to be displayed.
'''
from ffpyplayer.tools import get_best_pix_fmt
from ffpyplayer.pic import SWScale
if img is None:
return
img_fmt = img.get_pixel_format()
self.image_size = img_w, img_h = img.get_size()
update = force
if self._iw != img_w or self._ih != img_h:
update = True
if img_fmt not in ('yuv420p', 'rgba', 'rgb24', 'gray', 'bgr24', 'bgra'):
swscale = self._swscale
if img_fmt != self._sw_src_fmt or swscale is None or update:
ofmt = get_best_pix_fmt(
img_fmt, (
'yuv420p', 'rgba', 'rgb24', 'gray', 'bgr24', 'bgra'))
self._swscale = swscale = SWScale(
iw=img_w, ih=img_h, ifmt=img_fmt, ow=0, oh=0, ofmt=ofmt)
self._sw_src_fmt = img_fmt
img = swscale.scale(img)
img_fmt = img.get_pixel_format()
w, h = self.available_size or self.size
if (not w) or not h:
self.img = img
return
if self._fmt != img_fmt:
self._fmt = img_fmt
self._kivy_ofmt = {
'yuv420p': 'yuv420p', 'rgba': 'rgba', 'rgb24': 'rgb',
'gray': 'luminance', 'bgr24': 'bgr', 'bgra': 'bgra'}[img_fmt]
update = True
if update or w != self._last_w or h != self._last_h or \
self.rotation != self._last_rotation:
if self.scale_to_image:
rotation = self.rotation
rot = self.rotation * math.pi / 180
rot_w = abs(img_w * math.cos(rot)) + abs(img_h * math.sin(rot))
rot_h = abs(img_h * math.cos(rot)) + abs(img_w * math.sin(rot))
scalew, scaleh = w / rot_w, h / rot_h
scale = min(min(scalew, scaleh), 1)
self.transform = Matrix()
self.rotation = rotation
self.apply_transform(Matrix().scale(scale, scale, 1),
post_multiply=True)
self.pos = 0, 0
self._iw, self._ih = img_w, img_h
self._last_h = h
self._last_w = w
self._last_rotation = self.rotation
self.img = img
kivy_ofmt = self._kivy_ofmt
if update:
self.canvas.remove_group(str(self) + 'image_display')
if kivy_ofmt == 'yuv420p':
w2 = int(img_w / 2)
h2 = int(img_h / 2)
self._tex_y = Texture.create(size=(img_w, img_h),
colorfmt='luminance')
self._tex_u = Texture.create(size=(w2, h2),
colorfmt='luminance')
self._tex_v = Texture.create(size=(w2, h2),
colorfmt='luminance')
with self.canvas:
self._fbo = fbo = Fbo(size=(img_w, img_h),
group=str(self) + 'image_display')
with fbo:
BindTexture(texture=self._tex_u, index=1)
BindTexture(texture=self._tex_v, index=2)
Rectangle(size=fbo.size, texture=self._tex_y)
fbo.shader.fs = BufferImage._YUV_RGB_FS
fbo['tex_y'] = 0
fbo['tex_u'] = 1
fbo['tex_v'] = 2
tex = self.img_texture = fbo.texture
fbo.add_reload_observer(self.reload_buffer)
else:
tex = self.img_texture = Texture.create(
size=(img_w, img_h), colorfmt=kivy_ofmt)
tex.add_reload_observer(self.reload_buffer)
tex.flip_vertical()
if self.flip:
tex.flip_horizontal()
self.texture_size = img_w, img_h
if kivy_ofmt == 'yuv420p':
dy, du, dv, _ = img.to_memoryview()
self._tex_y.blit_buffer(dy, colorfmt='luminance')
self._tex_u.blit_buffer(du, colorfmt='luminance')
self._tex_v.blit_buffer(dv, colorfmt='luminance')
self._fbo.ask_update()
self._fbo.draw()
else:
self.img_texture.blit_buffer(img.to_memoryview()[0],
colorfmt=kivy_ofmt)
self.canvas.ask_update()
def reload_buffer(self, *args):
''' Reloads the last displayed image. It is and should be called
whenever the screen size changes or the last image need to be
recalculated.
'''
if self.img is not None:
self.update_img(self.img)
def rotate_right_reposition(self):
rotation = self.rotation - 90
factor = abs(int(round(rotation / 90))) % 4
self.rotation = math.copysign(factor * 90, rotation)
self.reload_buffer()
class ErrorIndicatorBehavior(ButtonBehavior):
'''A Button based class that visualizes and notifies on the current error
status.
When pressed, it stops the notification and displays in a popup the list
of errors/warnings/infos.
Errors are added to the log with :meth:`add_item.`
'''
_container = None
_level = StringProperty('ok')
_alpha = NumericProperty(1.)
_anim = None
levels = {'error': 0, 'warning': 1, 'info': 2}
icon_names = {}
count = NumericProperty(0)
__events__ = ('on_log_event', )
def __init__(self, **kw):
super(ErrorIndicatorBehavior, self).__init__(**kw)
a = self._anim = Sequence(
Animation(t='in_bounce', _alpha=1.),
Animation(t='out_bounce', _alpha=0))
a.repeat = True
def add_item(self, text, level='error'):
'''Adds a log item to the log. Upon addition, the button will notify
with an animation of the item.
:Parameters:
`text`: str
The text of the item.
`level`: str
Can be one of `error`, `warning`, or `info` indicating
the importance of the item. Defaults to `error`.
'''
levels = self.levels
if level not in levels:
raise ValueError('"{}" is not a valid level within "{}"'.
format(level, levels.keys()))
self.count += 1
if self._level == 'ok':
if levels[level] < levels['info']:
self._level = level
self._anim.start(self)
elif levels[level] < levels[self._level]:
self._level = level
self._container.data.append(
{'text': text, 'icon_name': self.icon_names.get(level, level)})
self.dispatch('on_log_event', self, text, level)
def on_log_event(self, *largs):
pass
class ErrorIndicatorBase(ErrorIndicatorBehavior, Widget):
pass
class TimeLineSlice(Widget):
'''A representation of a time slice of :class:`TimeLine`.
'''
duration = NumericProperty(0)
'''The duration of the slice.
'''
elapsed_t = NumericProperty(0)
'''The amount of time that has elapsed since the start of this slice.
Can be larger than :attr:`duration`, but visually it gets clipped to
:attr:`duration`.
'''
_scale = NumericProperty(0)
color = ObjectProperty(None, allownone=True)
'''If not None, it's a list of size 2 indicating the color to use for when
the slice is not yet done and when it's done, respectively. When not None,
it overwrites the values provided with :attr:`TimeLine.color_odd` and
::`attr.color_even`.
'''
_color = ListProperty([(1, 1, 1, 1), (1, 1, 1, 1)])
name = StringProperty('')
'''The name of the slice.
'''
text = StringProperty('')
'''If not empty, rather than displaying :attr:`name` when this slice is
active, it'll display this :attr:`text`.
'''
class TimeLine(BoxLayout):
'''A widget that displays an elapsing time line. It has named time slices
indicating e.g. timed stages and the time line progresses through them.
Slices are added/removed with :meth:`add_slice`, :meth:`remove_slice`, and
:meth:`clear_slices`. :meth:`smear_slices` is used to smear the width
of the slices so that they are non-linearly proportional to the provided
duration of each slice.
To move from one slice to another, :meth:`set_active_slice` must be called.
It sets all the previous slices preceding this slice as done. Slices do not
automatically finish, without this method being called.
Properties of
'''
slices = ListProperty([])
'''The list of :class:`TimeLineSlice` visualizing all the slices.
'''
slice_names = ListProperty([])
'''The given name corresponding to the slices in :attr:`slices`. They
should be unique.
'''
current_slice = NumericProperty(None, allownone=True)
'''The index in :attr:`slices` that is the current slice.
'''
timer = StringProperty('')
'''A string version of the amount of time elapsed within the current slice.
It gets reset when :meth:`set_active_slice` is called.
'''
text = StringProperty('')
'''The name of the current slice displayed in the status field. '''
color_odd = ListProperty([(0, .7, .2, 1), (.5, .5, 0, 1)])
'''A list of size 2 indicating the color to use when the slice is not yet
done and when it's done for odd slices, respectively. Each item is a 4
tuple indicating the rgba value (0-1) to use.
'''
color_even = ListProperty(
[(0, .2, .7, 1), (135 / 255., 206 / 255., 250 / 255., 1)])
'''A list of size 2 indicating the color to use when the slice is not yet
done and when it's done for even slices, respectively. Each item is a 4
tuple indicating the rgba value (0-1) to use.
'''
_start_t = perf_counter()
def __init__(self, **kwargs):
super(TimeLine, self).__init__(**kwargs)
Clock.schedule_interval(self._update_clock, .15)
def _update_clock(self, dt):
elapsed = perf_counter() - self._start_t
self.timer = pretty_time(elapsed)
if self.slices and self.current_slice is not None:
self.slices[self.current_slice].elapsed_t = elapsed
def set_active_slice(self, name, after=None):
'''Sets the slice that is the active slice. All the slices preceding
this slice will be marked as done and the timer will restart.
:Parameters:
`name`: str
The name of the slice to set as the current slice. It can be
the name of a non-existing slice.
`after`: str
If ``name`` is a non-existing slice, if ``after`` is None,
then all the slices preceding, and including the current slice
will be marked as done. Otherwise, all the slices preceding
and including the named slice will be marked as done.
'''
try:
idx = self.slice_names.index(name)
for s in self.slices[:idx]:
s.elapsed_t = max(s.duration, 10000)
for s in self.slices[idx:]:
s.elapsed_t = 0.
self.current_slice = idx
except ValueError:
if after is not None:
idx = self.slice_names.index(after)
for s in self.slices[:idx + 1]:
s.elapsed_t = max(s.duration, 10000)
for s in self.slices[idx + 1:]:
s.elapsed_t = 0.
elif self.current_slice is not None:
for s in self.slices[:self.current_slice + 1]:
s.elapsed_t = max(s.duration, 10000)
self.current_slice = None
self.text = name
self._start_t = perf_counter()
def clear_slices(self):
'''Removes all the slices and clears the time line.
'''
for ch in self.box.children[:]:
self.box.remove_widget(ch)
self.current_slice = None
self.slice_names = []
self.slices = []
self._start_t = perf_counter()
def update_slice_attrs(self, current_name, **kwargs):
'''Called to update the attributes of the :class:`TimeLineSlice`
instance associated with the name such as
:attr:`TimeLineSlice.duration` etc. Can be used to even rename the
slice.
:Parameters:
`name`: str
The name of the slice to update.
`**kwargs`: keyword args
The names and values of the slice to change.
'''
s = self.slices[self.slice_names.index(current_name)]
for key, val in kwargs.items():
setattr(s, key, val)
self._update_attrs()
def _update_attrs(self):
widgets = list(reversed(self.box.children))
self.slice_names = [widget.name for widget in widgets]
for i, wid in enumerate(widgets):
wid._color = self.color_odd if i % 2 else self.color_even
def add_slice(
self, name, before=None, duration=0, size_hint_x=None, **kwargs):
'''Adds a new slice to the timeline.
:Parameters:
`name`: str
The unique name of the new slice to create.
`before`: str
If not None, the name of the slice before which to create the
new slice. Otherwise, the default, it's added at the end.
`duration`: float, int
The estimated duration of the slice. Defaults to 0. A slice
of duration 0 is allowed.
`size_hint_x`: float
The width size_hint of the slice display. If None, the default,
the duration is used as the size hint, otherwise the provided
value is used. Since Kivy normalizes the size hints to 1.0, by
default the duration is used to scale the displayed width of
the slices to their durations.
'''
if 'text' not in kwargs:
kwargs['text'] = name
s = TimeLineSlice(
duration=duration, name=name,
size_hint_x=size_hint_x if size_hint_x is not None else duration,
**kwargs)
if before is not None:
i = self.slice_names.index(before)
old_len = len(self.slices)
self.slices.insert(s, i)
i = old_len - i
else:
self.slices.append(s)
i = 0
self.box.add_widget(s, index=i)
self._update_attrs()
def remove_slice(self, name):
'''Removes the named slice.
:Parameters:
`name`: str
The name of the slice to remove.
'''
s = self.slices.pop(self.slice_names.index(name))
self.box.remove_widget(s)
self._update_attrs()
def smear_slices(self, exponent=3):
'''Smears the width of the slices in a non-linear manner so that the
width of each slice become less exactly related to the duration of
the slice. It is useful to prevent some slices being huge and other
tiny.
Overall, the algorithm normalizes exponentiated durations to their mean
exponentiated value.
:Parameters:
`exponent`: float, int
The exponent to use when smearing the slices. Defaults to 3.
'''
widgets = self.box.children
vals = [w.duration for w in widgets if w.duration]
mn, mx = min(vals), max(vals)
center = (mn + mx) / 2.
a = pow(mx - center, exponent)
offset = abs(pow(mn - center, exponent) / a)
def f(x):
return max((2 * pow(x - center, exponent) / a) + offset, offset)
for w in widgets:
w.size_hint_x = f(w.duration)
class FlatTextInput(TextInput):
pass
class TimeSliceSelection(Widget):
low_handle = NumericProperty(0)
high_handle = NumericProperty(1)
min = NumericProperty(0)
max = NumericProperty(1)
low_val = NumericProperty(0)
high_val = NumericProperty(1)
_working = False
def __init__(self, **kwargs):
super(TimeSliceSelection, self).__init__(**kwargs)
self.fbind('min', self._update_handles)
self.fbind('max', self._update_handles)
self.fbind('width', self._update_handles)
self.fbind('x', self._update_handles)
self.fbind('low_val', self._update_handles)
self.fbind('high_val', self._update_handles)
def _update_handles(self, *largs):
if self._working:
return
lowval = self.low_val
highval = self.high_val
mn = self.min
mx = self.max
if lowval < mn:
self.low_val = mn
return
if highval > mx:
self.high_val = mx
return
if lowval > highval:
self._working = True
self.low_val = highval
self._working = False
self.high_val = lowval
return
self.low_handle = self.to_size(lowval - mn) + self.x
self.high_handle = self.to_size(highval - mn) + self.x
def to_size(self, value):
'''value is the state value. returns in size.
'''
diff = float(self.max - self.min)
w = self.width
if not diff or not w:
return 0
return value / diff * w
def to_state(self, value):
'''value is the size value. returns in state.
'''
diff = float(self.max - self.min)
w = float(self.width)
if not diff or not w:
return 0
return value / w * diff
def on_touch_down(self, touch):
if super(TimeSliceSelection, self).on_touch_down(touch):
return True
if not self.collide_point(*touch.pos):
return False
tol = dp(2)
if self.low_handle - tol <= touch.x <= self.high_handle + tol:
if self.low_handle + tol <= touch.x <= self.high_handle - tol:
touch.ud['{0}.{1}'.format('timeslice', self.uid)] = 'center'
else:
touch.ud['{0}.{1}'.format('timeslice', self.uid)] = 'side'
return True
return False
def on_touch_move(self, touch):
if super(TimeSliceSelection, self).on_touch_move(touch):
return True
drag_type = touch.ud.get('{0}.{1}'.format('timeslice', self.uid))
if drag_type not in ('center', 'side'):
return False
dx = touch.dx
start = touch.x - dx
positive = dx > 0
tol = dp(2)
diff = self.to_state(dx)
if drag_type == 'center':
if self.low_handle <= start <= self.high_handle:
if positive:
diff = min(diff, self.max - self.high_val)
self.high_val += diff # right side should move first
self.low_val += diff
else:
diff = max(diff, self.min - self.low_val)
self.low_val += diff # left side should move first
self.high_val += diff
return True
is_low = self.low_handle - tol <= start <= self.low_handle + tol
is_high = self.high_handle - tol <= start <= self.high_handle + tol
if is_low and is_high:
if self.low_handle == self.high_handle:
if positive:
is_low = False
else:
is_high = False
else:
if fabs(self.low_handle - start) <= \
fabs(self.high_handle - start):
is_high = False
else:
is_low = False
if is_low:
self.low_val = min(
max(self.min, self.low_val + diff), self.high_val)
else:
self.high_val = min(
max(self.low_val, self.high_val + diff), self.max)
class FlatSlider(Slider):
__events__ = ('on_release', )
def on_release(self, *largs):
pass
def on_touch_up(self, touch):
if super(FlatSlider, self).on_touch_up(touch):
if touch.grab_current == self:
self.dispatch('on_release', self)
return True
Factory.register('AutoSizedSpinnerBehavior', cls=AutoSizedSpinnerBehavior)
Factory.register('SpinnerBehavior', cls=SpinnerBehavior)
Factory.register('EventFocusBehavior', cls=EventFocusBehavior)
Factory.register('ErrorIndicatorBehavior', cls=ErrorIndicatorBehavior)
Factory.register('HighightButtonBehavior', cls=HighightButtonBehavior)
| [
"[email protected]"
] | |
1694499f38c5a7460bdbbd2db70b473fc4b3672a | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/Lambda-Resource-Static-Assets/2-resources/BLOG/Data-Structures/1-Python/sort/bubble_sort.py | eab30b253fea793d1b3452317c7e81488330911d | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 770 | py | """
https://en.wikipedia.org/wiki/Bubble_sort
Worst-case performance: O(N^2)
If you call bubble_sort(arr,True), you can see the process of the sort
Default is simulation = False
"""
def bubble_sort(arr, simulation=False):
def swap(i, j):
arr[i], arr[j] = arr[j], arr[i]
n = len(arr)
swapped = True
iteration = 0
if simulation:
print("iteration", iteration, ":", *arr)
x = -1
while swapped:
swapped = False
x = x + 1
for i in range(1, n - x):
if arr[i - 1] > arr[i]:
swap(i - 1, i)
swapped = True
if simulation:
iteration = iteration + 1
print("iteration", iteration, ":", *arr)
return arr
| [
"[email protected]"
] | |
9696ef11bc4fc57ad2c5358083c52b57eba4a87c | 3e7a8c5630de986a4d02011b1bd368c041f3f477 | /pytorch/pytorch实现多层感知机对手写数字分类.py | 9bc6523f288118ada524b6e1da6d7b78f30ab5bd | [] | no_license | gswyhq/hello-world | b9ef715f80d2b39c8efaa1aa2eb18a6257e26218 | b1ab053a05e1f8c63b8b04d6904a3cdca450bd9f | refs/heads/master | 2023-05-26T13:15:36.788620 | 2023-05-19T13:38:50 | 2023-05-19T13:38:50 | 158,821,148 | 16 | 6 | null | 2021-03-19T02:59:48 | 2018-11-23T11:04:43 | Python | UTF-8 | Python | false | false | 4,428 | py | #!/usr/bin/env python
# coding: utf-8
# # 3.10 多层感知机的简洁实现
# In[ ]:
import torch
from torch import nn
from torch.nn import init
import numpy as np
import sys
import torchvision
print(torch.__version__)
# ## 3.10.1 定义模型
# In[11]:
num_inputs, num_outputs, num_hiddens = 784, 10, 256
class FlattenLayer(torch.nn.Module):
def __init__(self):
super(FlattenLayer, self).__init__()
def forward(self, x): # x shape: (batch, *, *, ...)
return x.view(x.shape[0], -1)
net = nn.Sequential(
FlattenLayer(),
nn.Linear(num_inputs, num_hiddens),
nn.ReLU(),
nn.Linear(num_hiddens, num_outputs),
)
for params in net.parameters():
init.normal_(params, mean=0, std=0.01)
# ## 3.10.2 读取数据并训练模型
# In[12]:
batch_size = 256
def load_data_fashion_mnist(batch_size, resize=None, root='~/Datasets/FashionMNIST'):
"""Download the fashion mnist dataset and then load into memory."""
trans = []
if resize:
trans.append(torchvision.transforms.Resize(size=resize))
trans.append(torchvision.transforms.ToTensor())
transform = torchvision.transforms.Compose(trans)
mnist_train = torchvision.datasets.FashionMNIST(root=root, train=True, download=True, transform=transform)
mnist_test = torchvision.datasets.FashionMNIST(root=root, train=False, download=True, transform=transform)
if sys.platform.startswith('win'):
num_workers = 0 # 0表示不用额外的进程来加速读取数据
else:
num_workers = 4
train_iter = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=num_workers)
test_iter = torch.utils.data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=num_workers)
return train_iter, test_iter
def evaluate_accuracy(data_iter, net, device=None):
if device is None and isinstance(net, torch.nn.Module):
# 如果没指定device就使用net的device
device = list(net.parameters())[0].device
acc_sum, n = 0.0, 0
with torch.no_grad():
for X, y in data_iter:
if isinstance(net, torch.nn.Module):
net.eval() # 评估模式, 这会关闭dropout
acc_sum += (net(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
net.train() # 改回训练模式
else: # 自定义的模型, 3.13节之后不会用到, 不考虑GPU
if('is_training' in net.__code__.co_varnames): # 如果有is_training这个参数
# 将is_training设置成False
acc_sum += (net(X, is_training=False).argmax(dim=1) == y).float().sum().item()
else:
acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, optimizer=None):
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
y_hat = net(X)
l = loss(y_hat, y).sum()
# 梯度清零
if optimizer is not None:
optimizer.zero_grad()
elif params is not None and params[0].grad is not None:
for param in params:
param.grad.data.zero_()
l.backward()
if optimizer is None:
sgd(params, lr, batch_size)
else:
optimizer.step() # “softmax回归的简洁实现”一节将用到
train_l_sum += l.item()
train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
n += y.shape[0]
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
train_iter, test_iter = load_data_fashion_mnist(batch_size)
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=0.5)
num_epochs = 5
train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size, None, None, optimizer)
# 来源: https://github.com/ShusenTang/Dive-into-DL-PyTorch/blob/master/code/chapter03_DL-basics/3.10_mlp-pytorch.ipynb
| [
"[email protected]"
] | |
d40cbd78c22105244ca33f02a4478fe8bbf16590 | a8720518ad514ed4ce893afc43576b6d44ad80b1 | /homepage/core/admin.py | 42e968b08de540fbcf2531c8f353b64cdaf5e2c2 | [] | no_license | AlecAivazis/homepage-old | 39d7b08219a1aa1341af8a1ce8ae17dab136ea7d | c48abea73d7118455ac207058cdf0f9d00352877 | refs/heads/master | 2023-03-12T14:13:44.321900 | 2015-10-28T23:44:43 | 2015-10-28T23:44:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 357 | py | # -*- Python -*-
# -*- coding: utf-8 -*-
#
# alec aivazis
#
# this file describes the base administration for homepage
# homepage imports
from .models import Project, ProjectScreenshot
# import the django admin
from django.contrib import admin
# register the base models
admin.site.register(Project)
admin.site.register(ProjectScreenshot)
# end of file
| [
"[email protected]"
] | |
b683a8b084d250943a04f7b80d5cb9fa65abfa8c | ee974d693ca4c4156121f8cb385328b52eaac07c | /env/lib/python3.6/site-packages/imgaug/augmenters/contrast.py | 73170bb37c584e7b4de84c07db04386b23c4f881 | [] | no_license | ngonhi/Attendance_Check_System_with_Face_Recognition | f4531cc4dee565d0e45c02217f73f3eda412b414 | 92ff88cbc0c740ad48e149033efd38137c9be88d | refs/heads/main | 2023-03-12T07:03:25.302649 | 2021-02-26T15:37:33 | 2021-02-26T15:37:33 | 341,493,686 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | version https://git-lfs.github.com/spec/v1
oid sha256:6724a23ca8693f52610258fd36edf644480787d709d4351080b5513adc338d47
size 57678
| [
"Nqk180998!"
] | Nqk180998! |
1c65d2f8b68e1df88765b82bf73aa337b70d5bf6 | f576f0ea3725d54bd2551883901b25b863fe6688 | /sdk/networkcloud/azure-mgmt-networkcloud/generated_samples/agent_pools_create.py | e4702cfc48a1642bf33c4566afcd5f9eb16bf7eb | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | Azure/azure-sdk-for-python | 02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c | c2ca191e736bb06bfbbbc9493e8325763ba990bb | refs/heads/main | 2023-09-06T09:30:13.135012 | 2023-09-06T01:08:06 | 2023-09-06T01:08:06 | 4,127,088 | 4,046 | 2,755 | MIT | 2023-09-14T21:48:49 | 2012-04-24T16:46:12 | Python | UTF-8 | Python | false | false | 4,299 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.networkcloud import NetworkCloudMgmtClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-networkcloud
# USAGE
python agent_pools_create.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = NetworkCloudMgmtClient(
credential=DefaultAzureCredential(),
subscription_id="123e4567-e89b-12d3-a456-426655440000",
)
response = client.agent_pools.begin_create_or_update(
resource_group_name="resourceGroupName",
kubernetes_cluster_name="kubernetesClusterName",
agent_pool_name="agentPoolName",
agent_pool_parameters={
"extendedLocation": {
"name": "/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.ExtendedLocation/customLocations/clusterExtendedLocationName",
"type": "CustomLocation",
},
"location": "location",
"properties": {
"administratorConfiguration": {
"adminUsername": "azure",
"sshPublicKeys": [
{
"keyData": "ssh-rsa AAtsE3njSONzDYRIZv/WLjVuMfrUSByHp+jfaaOLHTIIB4fJvo6dQUZxE20w2iDHV3tEkmnTo84eba97VMueQD6OzJPEyWZMRpz8UYWOd0IXeRqiFu1lawNblZhwNT/ojNZfpB3af/YDzwQCZgTcTRyNNhL4o/blKUmug0daSsSXISTRnIDpcf5qytjs1Xo+yYyJMvzLL59mhAyb3p/cD+Y3/s3WhAx+l0XOKpzXnblrv9d3q4c2tWmm/SyFqthaqd0= admin@vm"
}
],
},
"agentOptions": {"hugepagesCount": 96, "hugepagesSize": "1G"},
"attachedNetworkConfiguration": {
"l2Networks": [
{
"networkId": "/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.NetworkCloud/l2Networks/l2NetworkName",
"pluginType": "DPDK",
}
],
"l3Networks": [
{
"ipamEnabled": "False",
"networkId": "/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.NetworkCloud/l3Networks/l3NetworkName",
"pluginType": "SRIOV",
}
],
"trunkedNetworks": [
{
"networkId": "/subscriptions/123e4567-e89b-12d3-a456-426655440000/resourceGroups/resourceGroupName/providers/Microsoft.NetworkCloud/trunkedNetworks/trunkedNetworkName",
"pluginType": "MACVLAN",
}
],
},
"availabilityZones": ["1", "2", "3"],
"count": 3,
"labels": [{"key": "kubernetes.label", "value": "true"}],
"mode": "System",
"taints": [{"key": "kubernetes.taint", "value": "true"}],
"upgradeSettings": {"maxSurge": "1"},
"vmSkuName": "NC_M16_v1",
},
"tags": {"key1": "myvalue1", "key2": "myvalue2"},
},
).result()
print(response)
# x-ms-original-file: specification/networkcloud/resource-manager/Microsoft.NetworkCloud/stable/2023-07-01/examples/AgentPools_Create.json
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
b92c719c838c44f7a9aab607d59e87dbb6da351d | e97060ebb056b8c037e9cf95be08158ecab321bc | /ibmsecurity/isds/interfaces.py | f6af6a062b6ec84043d1ca491d2a39cc3363fcb0 | [
"Apache-2.0"
] | permissive | sandermey/ibmsecurity | 74ed8378e9ddb9f778b76d227e90cfb747511c1e | 92ba7828260e96a6a323f4ac3830bfa43ee8dd7e | refs/heads/master | 2020-04-09T22:49:06.302901 | 2018-03-07T05:04:37 | 2018-03-07T05:04:37 | 124,246,868 | 0 | 0 | Apache-2.0 | 2018-03-07T14:21:29 | 2018-03-07T14:21:28 | null | UTF-8 | Python | false | false | 1,052 | py | import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
def get_all(isdsAppliance, check_mode=False, force=False):
"""
Retrieving all interfaces
:rtype: (str, dict)
"""
return isdsAppliance.invoke_get("Retrieving all interfaces", "/widgets/mgmtinterface")
def get_all_app(isdsAppliance, check_mode=False, force=False):
"""
Retrieving all application interfaces
:rtype: (str, dict)
"""
return isdsAppliance.invoke_get("Retrieving all application interfaces", "/application_interfaces")
def get(isdsAppliance, uuid, check_mode=False, force=False):
"""
Retrieving a single interface
"""
return isdsAppliance.invoke_get("Retrieving a single interface", "/application_interfaces/" + uuid + "/addresses/1")
def compare(isdsAppliance1, isdsAppliance2):
"""
Compare interfaces between 2 appliances
"""
ret_obj1 = get_all(isdsAppliance1)
ret_obj2 = get_all(isdsAppliance2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2)
| [
"[email protected]"
] | |
2eab281a1db78a8b6420122afd404fe0b0a12c37 | 568fa58296378fa129ab3349adf010daa44ed45b | /third_party/incubator-tvm/python/tvm/contrib/miopen.py | e062ac1e735ecdca9a09778d0aaef388d7a78837 | [
"Zlib",
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"BSD-2-Clause",
"BSD-3-Clause",
"NCSA",
"X11-distribute-modifications-variant",
"Unlicense",
"LLVM-exception"
] | permissive | mindspore-ai/akg | 37f471badc66de6a831f1f45ad84344f34d23ef2 | 99f33858d6972741748cbfc9ab0bf9600428fef7 | refs/heads/master | 2023-07-25T23:03:17.672665 | 2023-07-11T07:33:57 | 2023-07-11T07:33:57 | 274,077,856 | 319 | 36 | Apache-2.0 | 2021-12-30T13:43:08 | 2020-06-22T08:09:05 | Python | UTF-8 | Python | false | false | 3,805 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""External function interface to MIOpen library."""
# pylint: disable-msg=C0103
import ctypes
import numpy as np
from .. import api as _api
from .. import intrin as _intrin
from .. import get_global_func as _get_global_func
def _get_np_int32_array_handle(arr):
"""Return a void_p handle for a numpy array
Parameters
----------
arr: numpy.NDArray
source numpy array
Returns
-------
ptr: ctypes.c_void_p
pointer to the data
"""
assert arr.dtype == np.int32
ptr = arr.ctypes.data_as(ctypes.POINTER(ctypes.c_int32))
return ctypes.cast(ptr, ctypes.c_void_p)
def conv2d_forward(x,
w,
stride_h=1,
stride_w=1,
pad_h=0,
pad_w=0,
dilation_h=1,
dilation_w=1,
conv_mode=0,
data_type=1,
group_count=1):
"""Create an extern op that compute 2D convolution with MIOpen
Parameters
----------
x: Tensor
input feature map
w: Tensor
convolution weight
stride_h: int
height stride
stride_w: int
width stride
pad_h: int
height pad
pad_w: int
weight pad
dilation_h: int
height dilation
dilation_w: int
width dilation
conv_mode: int
0: miopenConvolution
1: miopenTranspose
data_type: int
0: miopenHalf (fp16)
1: miopenFloat (fp32)
group_count: int
number of groups
Returns
-------
y: Tensor
The result tensor
"""
assert (0 <= conv_mode <= 2), "0: miopenConvolution / 1: miopenTranspose / 2: miopenGroupConv"
if group_count > 1:
conv_mode = 2
oshape = np.zeros((len(x.shape)), dtype=np.int32)
xshape = x.shape
wshape = w.shape
setup_func = _get_global_func("tvm.contrib.miopen.conv2d.setup")
algo = setup_func(conv_mode,
data_type,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
xshape[0].value,
xshape[1].value,
xshape[2].value,
xshape[3].value,
wshape[0].value,
wshape[1].value,
wshape[2].value,
wshape[3].value,
group_count,
_get_np_int32_array_handle(oshape))
return _api.extern(
list(oshape), [x, w],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.miopen.conv2d.forward",
conv_mode,
data_type,
pad_h,
pad_w,
stride_h,
stride_w,
dilation_h,
dilation_w,
algo,
ins[0],
ins[1],
outs[0]), name="y")
| [
"[email protected]"
] | |
ef9da3bac625f676f56fdc1d3a6ff80c1630d9da | 62e58c051128baef9452e7e0eb0b5a83367add26 | /x12/4060/224004060.py | 0cfd110dffd480626cfac7c195c4d02697106c2d | [] | no_license | dougvanhorn/bots-grammars | 2eb6c0a6b5231c14a6faf194b932aa614809076c | 09db18d9d9bd9d92cefbf00f1c0de1c590fe3d0d | refs/heads/master | 2021-05-16T12:55:58.022904 | 2019-05-17T15:22:23 | 2019-05-17T15:22:23 | 105,274,633 | 0 | 0 | null | 2017-09-29T13:21:21 | 2017-09-29T13:21:21 | null | UTF-8 | Python | false | false | 416 | py | from bots.botsconfig import *
from records004060 import recorddefs
syntax = {
'version' : '00403', #version of ISA to send
'functionalgroup' : 'MA',
}
structure = [
{ID: 'ST', MIN: 1, MAX: 1, LEVEL: [
{ID: 'CF1', MIN: 1, MAX: 1},
{ID: 'CF2', MIN: 1, MAX: 9999, LEVEL: [
{ID: 'L11', MIN: 0, MAX: 99},
]},
{ID: 'SE', MIN: 1, MAX: 1},
]}
]
| [
"[email protected]"
] | |
cfd2f57ade3b18556861fc73ac6feeca514adc38 | e4d39c514c8f555a706d4d293b91e253a23614aa | /manage.py | 572ea681f23fba520e85784690e8183a78c12971 | [] | no_license | shubhamjain31/AskForum | ddda79528026ed027e2268b99cfc5cb9ea412c54 | f234f9ad1fb8d55520ced6987b1aec5d74fc7c08 | refs/heads/main | 2023-03-10T21:41:25.559793 | 2021-02-26T16:52:58 | 2021-02-26T16:52:58 | 309,034,117 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 664 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'AskForum.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
50ef32c94d5e5aee734232dd3e61fc70bf587629 | 8773e8c9b9a0a6e407f91b6f7c6321141d7e8356 | /P0113.py | 0b8fdc5ce3434443cc3611041a335ba30d4bcad7 | [] | no_license | westgate458/LeetCode | 1836bb21e8dd95386ccab390f5fd04567a429a02 | 36d7f9e967a62db77622e0888f61999d7f37579a | refs/heads/master | 2021-12-28T04:16:36.875737 | 2021-12-17T05:48:09 | 2021-12-17T05:48:09 | 152,928,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,231 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 3 13:23:08 2019
@author: Tianqi Guo
"""
class Solution(object):
def pathSum(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: List[List[int]]
"""
# deal with trivial case
if not root:
return []
# if current node is a leaf
elif not root.left and not root.right:
# if remaining sum is equal to current value
if sum == root.val:
# current leaf is part of the desired path
return [[root.val]]
# if current leaf doesn't give the desired sum
else:
# return empty list
return []
# if current node is not a leaf
else:
# 1) recursively check the child branches, which return lists of values that add up to the desired sum
# 2) for the combined lists, add the value current node to each entry
# 3) return the updated list of path values to previous level
return [[root.val] + combo for combo in self.pathSum(root.left, sum - root.val) + self.pathSum(root.right, sum - root.val)] | [
"[email protected]"
] | |
f4440770a165003e26a1fe82ab270ff926180faa | b822fd48d109c59a07cfef5196888c3f22c792b3 | /aae/train.py | 0c434501c97952da300c1a76736c15a79a31d1cb | [
"MIT"
] | permissive | cupOJoseph/drawlikebobross | 2e179f24bc59303be2782d95880235c57995a460 | e4c33745c605d17ea6b9e5bea3cf339eb875a58a | refs/heads/master | 2022-05-05T12:18:45.504161 | 2018-07-05T02:21:40 | 2018-07-05T02:21:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,237 | py | import os
import sys
import argparse
import torch
import torchvision.transforms as transforms
sys.path.append(os.path.dirname(__file__))
from trainer import gan_trainer
from loader import BobRossDataset
# Params
parser = argparse.ArgumentParser(description='GAN trainer')
parser.add_argument('--epoch', default=500, type=int)
parser.add_argument('--lr', default=0.001, type=float)
parser.add_argument('--cuda', default='true', type=str)
parser.add_argument('--resume', default='', type=str)
args, unknown = parser.parse_known_args()
cuda = True if 'true' in args.cuda.lower() else False
# cuda = True
transformers = transforms.Compose([
transforms.ToTensor(),
])
# Gan trainer
trainer = gan_trainer(z_dim=8, h_dim=128, filter_num=64, channel_num=3, lr=args.lr, cuda=cuda)
if __name__ == '__main__':
if args.resume:
trainer.load_(args.resume)
# dataset
train_dataset = BobRossDataset('../dataset/bobross.h5py', transform=transformers)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, batch_size=8, shuffle=True,
pin_memory=cuda, num_workers=4
)
for e in range(trainer.start_epoch, args.epoch):
trainer.train(train_loader, e)
trainer.save_(e)
| [
"[email protected]"
] | |
75302cb9e4d1e3d5f8bf29b9814776112b3b0823 | ab0f2794733a129d4073542a1c9315f95f1b7ca8 | /hello.py | ddddd1c76ee9aebeeffa87a41cc017b685828c26 | [] | no_license | krishna-prasath/guvi | d0fbe631e932888ba426f77ba4feaa32d4e66781 | 98fd72ffa03e5d8e7a9fe68989924b690cd92c1b | refs/heads/master | 2020-04-15T04:59:38.243149 | 2019-06-12T19:32:59 | 2019-06-12T19:32:59 | 164,404,997 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | a=int(input())
if a==0:
print(" ")
else:
print("Hello"*a)
| [
"[email protected]"
] | |
faa0232a40c211a3852add071f93ba865508361c | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part002186.py | 828488582f050dee16fc5a1431eafefa81c4dca2 | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher14833(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.4.1.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.2.1.4.1.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher14833._instance is None:
CommutativeMatcher14833._instance = CommutativeMatcher14833()
return CommutativeMatcher14833._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 14832
return
yield
from collections import deque | [
"[email protected]"
] | |
67c3f9ec9c939f9d356a5fd38f10e6df68ba4e5e | 1d928c3f90d4a0a9a3919a804597aa0a4aab19a3 | /python/celery/2016/4/test_filesystem.py | e96039d394100b3efa36fe17417d2f22b8d73151 | [] | no_license | rosoareslv/SED99 | d8b2ff5811e7f0ffc59be066a5a0349a92cbb845 | a062c118f12b93172e31e8ca115ce3f871b64461 | refs/heads/main | 2023-02-22T21:59:02.703005 | 2021-01-28T19:40:51 | 2021-01-28T19:40:51 | 306,497,459 | 1 | 1 | null | 2020-11-24T20:56:18 | 2020-10-23T01:18:07 | null | UTF-8 | Python | false | false | 2,471 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import os
import shutil
import tempfile
from celery import states
from celery.backends.filesystem import FilesystemBackend
from celery.exceptions import ImproperlyConfigured
from celery.utils import uuid
from celery.tests.case import AppCase, skip
@skip.if_win32()
class test_FilesystemBackend(AppCase):
def setup(self):
self.directory = tempfile.mkdtemp()
self.url = 'file://' + self.directory
self.path = self.directory.encode('ascii')
def teardown(self):
shutil.rmtree(self.directory)
def test_a_path_is_required(self):
with self.assertRaises(ImproperlyConfigured):
FilesystemBackend(app=self.app)
def test_a_path_in_url(self):
tb = FilesystemBackend(app=self.app, url=self.url)
self.assertEqual(tb.path, self.path)
def test_path_is_incorrect(self):
with self.assertRaises(ImproperlyConfigured):
FilesystemBackend(app=self.app, url=self.url + '-incorrect')
def test_missing_task_is_PENDING(self):
tb = FilesystemBackend(app=self.app, url=self.url)
self.assertEqual(tb.get_state('xxx-does-not-exist'), states.PENDING)
def test_mark_as_done_writes_file(self):
tb = FilesystemBackend(app=self.app, url=self.url)
tb.mark_as_done(uuid(), 42)
self.assertEqual(len(os.listdir(self.directory)), 1)
def test_done_task_is_SUCCESS(self):
tb = FilesystemBackend(app=self.app, url=self.url)
tid = uuid()
tb.mark_as_done(tid, 42)
self.assertEqual(tb.get_state(tid), states.SUCCESS)
def test_correct_result(self):
data = {'foo': 'bar'}
tb = FilesystemBackend(app=self.app, url=self.url)
tid = uuid()
tb.mark_as_done(tid, data)
self.assertEqual(tb.get_result(tid), data)
def test_get_many(self):
data = {uuid(): 'foo', uuid(): 'bar', uuid(): 'baz'}
tb = FilesystemBackend(app=self.app, url=self.url)
for key, value in data.items():
tb.mark_as_done(key, value)
for key, result in tb.get_many(data.keys()):
self.assertEqual(result['result'], data[key])
def test_forget_deletes_file(self):
tb = FilesystemBackend(app=self.app, url=self.url)
tid = uuid()
tb.mark_as_done(tid, 42)
tb.forget(tid)
self.assertEqual(len(os.listdir(self.directory)), 0)
| [
"[email protected]"
] | |
37ec8231587eda9a2df9ede1cf5ec2282956d8c1 | d4d3b859f136154427c36924f87525590853873a | /Tools.py | b104a92fdf8abdde26ffeaa6ddae63c1d381c369 | [] | no_license | lijiunderstand/Semantic_Segmentation_RefineNet | 46b002f53254d5cc0bb4b9565382d2386a1d01c9 | ec7ea477096dafc2052fa74fdb3277199251a35f | refs/heads/master | 2020-04-18T10:16:16.303654 | 2018-07-11T16:41:18 | 2018-07-11T16:41:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,346 | py | import time
import os
from tensorflow.python import pywrap_tensorflow
import numpy as np
from matplotlib import pyplot as plt
import cv2
class Tools:
def __init__(self):
pass
@staticmethod
def print_info(info):
print(time.strftime("%H:%M:%S", time.localtime()), info)
pass
# 新建目录
@staticmethod
def new_dir(path):
if not os.path.exists(path):
os.makedirs(path)
return path
@staticmethod
def print_ckpt(ckpt_path):
reader = pywrap_tensorflow.NewCheckpointReader(ckpt_path)
var_to_shape_map = reader.get_variable_to_shape_map()
for key in var_to_shape_map:
print("tensor_name: ", key)
print(reader.get_tensor(key))
pass
pass
pass
class Visualize:
@staticmethod
def _discrete_matshow_adaptive(data, labels_names=[], title=""):
fig_size = [7, 6]
plt.rcParams["figure.figsize"] = fig_size
cmap = plt.get_cmap('Paired', np.max(data) - np.min(data) + 1)
mat = plt.matshow(data,
cmap=cmap,
vmin=np.min(data) - .5,
vmax=np.max(data) + .5)
cax = plt.colorbar(mat,
ticks=np.arange(np.min(data), np.max(data) + 1))
if labels_names:
cax.ax.set_yticklabels(labels_names)
if title:
plt.suptitle(title, fontsize=15, fontweight='bold')
fig = plt.gcf()
fig.savefig('data/tmp.jpg', dpi=300)
img = cv2.imread('data/tmp.jpg')
return img
@staticmethod
def visualize_segmentation_adaptive(predictions, segmentation_class_lut, title="Segmentation"):
# TODO: add non-adaptive visualization function, where the colorbar
# will be constant with names
unique_classes, relabeled_image = np.unique(predictions, return_inverse=True)
relabeled_image = relabeled_image.reshape(predictions.shape)
labels_names = []
for index, current_class_number in enumerate(unique_classes):
labels_names.append(str(index) + ' ' + segmentation_class_lut[current_class_number])
im = Visualize._discrete_matshow_adaptive(data=relabeled_image, labels_names=labels_names, title=title)
return im
pass
| [
"[email protected]"
] | |
428f590f1df538a492bd7a601fcb55bf5ce4ee3b | 81fff1188c6918fbe7ccbcd9e61b62456f0abef5 | /backend/settings.py | 4531b84a2b3ef8fe367cd5479e52c1a85a274c8d | [
"MIT"
] | permissive | mugash/cookbook-graphql-backend | 2742087f3e6c4012f5c99c17c0518c27a8b30078 | 116e9dc2e5b0d63a2e4429a5c6f192cd0c43508d | refs/heads/master | 2020-12-02T16:13:25.043539 | 2017-07-07T09:02:42 | 2017-07-07T09:02:42 | 96,519,903 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,205 | py | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.11.3.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ng5ptohji7_9@u(e&az$ljy4(#ai+tj#dcj-hg92wdrjxdpcx6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'graphene_django',
'ingredients'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
GRAPHENE = {
'SCHEMA': 'backend.schema.schema'
}
| [
"[email protected]"
] | |
9d12a804190337836e997d0e7f88d8a22da76e8b | 84147502b81451a2f9bcaabc00a35789afe132f0 | /fastapistudy/test_chapter08.py | abf3208c284c9c77487e9a7b074f1a6fe1c9b59a | [] | no_license | teng-tt/Fastapi_Study | bfdb3ca9f97cf8e2a928f56a77d0fc17c5bb9692 | 946decd07b0de98ce353d4008c7920c778a94a6f | refs/heads/master | 2023-06-01T14:57:43.811721 | 2021-06-13T03:50:55 | 2021-06-13T03:50:55 | 360,474,922 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 878 | py | # !/usr/bin/python3
# -*- coding:utf-8 -*-
# __author__ = "Teng"
from fastapi.testclient import TestClient
from run import app
""" 测试用例 """
client = TestClient(app) # 先安装pytest
def test_run_bg_task(): # 主义不是async def h函数test开头是一种pytest规范
response = client.post(url="/chapter08/background_tasks?framework=FastApi")
assert response.status_code == 200
assert response.json() == {"message": "任务已在后台运行"}
def test_dependency_run_bg_task():
response = client.post(url="/chapter08/dependency/background_tasks")
assert response.status_code == 200
assert response.json() is None
def test_dependency_run_bg_task_q():
response = client.post(url="/chapter08/dependency/background_tasks?q=1")
assert response.status_code == 200
assert response.json() == {"message": "README.md更新成功"} | [
"[email protected]"
] | |
6127053660627a2dde6c74165c90c823c64c299b | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_04_01/aio/operations/_ddos_custom_policies_operations.py | eac7c4ae02f97170fa3e3be18ebca8027cc97483 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 22,058 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DdosCustomPoliciesOperations:
"""DdosCustomPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
**kwargs
) -> "models.DdosCustomPolicy":
"""Gets information about the specified DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DdosCustomPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.DdosCustomPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "models.DdosCustomPolicy",
**kwargs
) -> "models.DdosCustomPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DdosCustomPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "models.DdosCustomPolicy",
**kwargs
) -> AsyncLROPoller["models.DdosCustomPolicy"]:
"""Creates or updates a DDoS custom policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the create or update operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.DdosCustomPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.DdosCustomPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'ddosCustomPolicyName': self._serialize.url("ddos_custom_policy_name", ddos_custom_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
ddos_custom_policy_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.DdosCustomPolicy"]:
"""Update a DDoS custom policy tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param ddos_custom_policy_name: The name of the DDoS custom policy.
:type ddos_custom_policy_name: str
:param parameters: Parameters supplied to the update DDoS custom policy resource tags.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DdosCustomPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.DdosCustomPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DdosCustomPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
ddos_custom_policy_name=ddos_custom_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DdosCustomPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ddosCustomPolicies/{ddosCustomPolicyName}'} # type: ignore
| [
"[email protected]"
] | |
a6e2b69a7dd2c15bf8f960ee53cf86c2fca9e9cd | 9eb35d6df7b0490d556623f84dba12bb05f30ee2 | /models_and_validation/cross_validation.py | 6b2524e4812c1b21e05c66400b87e28df0741375 | [
"MIT"
] | permissive | FelSiq/statistics-related | 0b4442bd19338c5b0da7dcf5ecd53eb304dcd3f8 | ee050202717fc368a3793b195dea03687026eb1f | refs/heads/master | 2021-11-24T12:31:08.660652 | 2021-11-03T23:42:39 | 2021-11-03T23:42:39 | 211,089,869 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,204 | py | """Tests with cross-validation.
General and simple method used for estimating unknown parameters
from data.
General algorithm:
1. Randomly partition the data X of size n into X_{train} and X_{test}
Let m = X_{test}.size
Therefore, X_{train}.size = n - m
2. Fit the model using X_{train}
3. Test the fitted model using X_{test}
4. Repeat t times and average the results
Some of the most known Cross-validation procedures:
k-fold CV: partition the data X into k (approximately) equal-sized
subsets. t = k and m = n/k (tests of every subset once.)
Leave-one-out (LOO) CV: m = 1, t = n, testing on every sample once.
(The same as K-fold CV with k = n).
Monte Carlo CV: randomly sample subsets of suitable size for the
desired number of times.
"""
import typing as t
import numpy as np
def kfold_cv(
X: np.ndarray,
k: int = 10,
shuffle: bool = True,
return_inds: bool = False,
random_state: t.Optional[int] = None,
) -> t.Iterator[t.Tuple[np.ndarray, np.ndarray]]:
"""K-fold Cross Validation."""
if not isinstance(k, (int, np.int, np.int32, np.int64)):
raise TypeError("'k' must be an integer (got {}.)".format(type(k)))
if k <= 1:
raise ValueError("'k' must be a greater than 1 (got {}.)".format(k))
n_samples = X.size if X.ndim == 1 else X.shape[0]
if n_samples < max(2, k):
raise ValueError("Insufficient number of instances ({}). "
"Required num_inst >= max(2, k)".format(n_samples))
test_size = int(n_samples / k)
uneven_extra_inds = n_samples - k * test_size
indices = np.arange(n_samples)
if shuffle:
if random_state is not None:
np.random.seed(random_state)
np.random.shuffle(indices)
for _ in np.arange(k):
split_index = test_size + int(uneven_extra_inds > 0)
uneven_extra_inds -= 1
if return_inds:
yield indices[:split_index], indices[split_index:]
else:
yield X[indices[:split_index]], X[indices[split_index:]]
indices = np.roll(indices, -split_index)
def loo_cv(
X: np.ndarray,
shuffle: bool = True,
return_inds: bool = False,
random_state: t.Optional[int] = None,
) -> t.Iterator[t.Tuple[np.ndarray, np.ndarray]]:
"""LOOCV (Leave-one-out Cross Validation).
This is the same as n-fold Cross Validation (k = n).
"""
n_samples = X.size if X.ndim == 1 else X.shape[0]
for fold in kfold_cv(
X=X,
k=n_samples,
shuffle=shuffle,
return_inds=return_inds,
random_state=random_state):
yield fold
def jackknife(
X: np.ndarray,
k: int = 0,
shuffle: bool = True,
return_inds: bool = False,
random_state: t.Optional[int] = None,
) -> t.Iterator[np.ndarray]:
"""Jackknife iterator.
The jackknife procedure partitions the ``X`` data into k folds,
and, unlike the Cross Validation procedure, returns just the
`kept/train` examples.
If k <= 0, then k = `number of instances` is used.
"""
n_samples = X.size if X.ndim == 1 else X.shape[0]
k = n_samples if k <= 0 else k
for _, train_vals in kfold_cv(
X=X,
k=k,
shuffle=shuffle,
return_inds=return_inds,
random_state=random_state):
yield train_vals
def monte_carlo_cv(X: np.ndarray,
test_frac: float = 0.2,
n: int = 10,
return_inds: bool = False,
random_state: t.Optional[int] = None
) -> t.Iterator[t.Tuple[np.ndarray, np.ndarray]]:
"""Monte Carlo Cross Validation."""
if not isinstance(test_frac, float):
raise ValueError("'test_frac' must be float type (got {}.)".format(
type(test_frac)))
if not isinstance(n, int):
raise TypeError("'n' must be an integer (got {}.)".format(type(n)))
if n <= 0:
raise ValueError("'n' must be a positive value (got {}.)".format(n))
if not 0 < test_frac < 1:
raise ValueError(
"'test_frac' must be in (0.0, 1.0) interval (got {}.)".format(
test_frac))
n_samples = X.size if X.ndim == 1 else X.shape[0]
if n_samples < 2:
raise ValueError("Number of samples must be greater than 1 "
"(got {}.)".format(n_samples))
test_size = int(test_frac * n_samples)
if test_size == 0:
raise ValueError(
"Test subset with 0 instances. Please choose a higher 'test_frac' (got {}.)"
.format(test_frac))
if random_state is not None:
np.random.seed(random_state)
indices = np.arange(n_samples)
for _ in np.arange(n):
np.random.shuffle(indices)
inds_test, inds_train = np.split(indices, [test_size])
if return_inds:
yield inds_test, inds_train
else:
yield X[inds_test], X[inds_train]
def _test():
for fold in monte_carlo_cv(np.arange(2), test_frac=0.99, random_state=1):
print(fold)
if __name__ == "__main__":
_test()
| [
"[email protected]"
] | |
abc24fb1a2d224e5ce351a3c7c1e216546bed8fa | e6208febf7e34d4108422c6da54453373733a421 | /sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_gcs_artifact_repository.py | 721344cf443c2cbdacfa8ae5df74a17003b32c37 | [
"Apache-2.0"
] | permissive | wreed4/argo | 05889e5bb7738d534660c58a7ec71c454e6ac9bb | 41f94310b0f7fee1ccd533849bb3af7f1ad4f672 | refs/heads/master | 2023-01-22T05:32:12.254485 | 2022-01-27T21:24:45 | 2022-01-27T22:02:22 | 233,143,964 | 0 | 0 | Apache-2.0 | 2023-01-17T19:04:43 | 2020-01-10T22:56:25 | Go | UTF-8 | Python | false | false | 12,162 | py | """
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.secret_key_selector import SecretKeySelector
globals()['SecretKeySelector'] = SecretKeySelector
class IoArgoprojWorkflowV1alpha1GCSArtifactRepository(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'bucket': (str,), # noqa: E501
'key_format': (str,), # noqa: E501
'service_account_key_secret': (SecretKeySelector,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'bucket': 'bucket', # noqa: E501
'key_format': 'keyFormat', # noqa: E501
'service_account_key_secret': 'serviceAccountKeySecret', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1GCSArtifactRepository - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bucket (str): Bucket is the name of the bucket. [optional] # noqa: E501
key_format (str): KeyFormat is defines the format of how to store keys. Can reference workflow variables. [optional] # noqa: E501
service_account_key_secret (SecretKeySelector): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1GCSArtifactRepository - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
bucket (str): Bucket is the name of the bucket. [optional] # noqa: E501
key_format (str): KeyFormat is defines the format of how to store keys. Can reference workflow variables. [optional] # noqa: E501
service_account_key_secret (SecretKeySelector): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"[email protected]"
] | |
feb5e81dff37b3e59bb6b25fe4a2ad1dd53ee5f0 | 4591684136ac81244d5337197e97f58864d7fff3 | /keras/keras18_ensemble1.py | afb0ad3b17ce43849797d7ecfda41ba1b2a9b692 | [] | no_license | marattang/AI_training | 4b15e9d9734d77ae04beaae078749c85d832c9c5 | f7f1a2b762dcf770335b62ee668ad1c54ccf1ceb | refs/heads/main | 2023-06-20T19:05:10.385238 | 2021-07-26T00:29:10 | 2021-07-26T00:29:10 | 383,965,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,746 | py | from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
import numpy as np
from tensorflow.keras.layers import concatenate, Concatenate
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
x1 = np.array([range(100), range(301, 401), range(1, 101)])
x2 = np.array([range(101, 201), range(411, 511), range(100, 200)])
x1 = np.transpose(x1)
x2 = np.transpose(x2)
y1 = np.array([range(1001, 1101)])
# y1 = np.array(range(1001, 1101)) [] 빼면 (100,)
y1 = np.transpose(y1)
print(x1.shape, x2.shape, y1.shape)
# x1_train, x1_test, x2_train, x2_test, y_train, y_test = train_test_split(x1, x2, y1, test_size=0.2, random_state=8, shuffle=True)
x1_train, x1_test, x2_train, x2_test, y_train, y_test = train_test_split(x1, x2, y1, random_state=8, shuffle=True)
print(x1_train.shape, x2_train.shape, y_train.shape)
print(x1_test.shape, x2_test.shape, y_test.shape)
# 모델 구성
# 실습
# #2-1 모델1
input1 = Input(shape=(3,))
dense1 = Dense(55, activation='relu', name='dense1')(input1)
dense2 = Dense(32, activation='relu', name='dense2')(dense1)
dense3 = Dense(26, activation='relu', name='dense3')(dense2)
output1 = Dense(18)(dense3)
# #2-2 모델2
input2 = Input(shape=(3,))
dense11 = Dense(45, activation='relu', name='dense11')(input2)
dense12 = Dense(28, activation='relu', name='dense12')(dense11)
dense13 = Dense(20, activation='relu', name='dense13')(dense12)
dense14 = Dense(10, activation='relu', name='dense14')(dense13)
output2 = Dense(7)(dense14)
merge1 = concatenate([output1, output2]) # 첫번째 모델의 가장 마지막 부분, 두번째 모델의 가장 마지막 부분 병합.
# 과제 4. Concentenate로 코딩
merge1 = Concatenate(axis=1)([output1, output2])
merge2 = Dense(24)(merge1)
merge3 = Dense(15, activation='relu')(merge2)
last_output = Dense(1)(merge3)
# last_output = Dense(1)(merge1)
model = Model(inputs=[input1, input2], outputs=last_output)
# model.summary()
# 3. 컴파일, 훈련
model.compile(loss = 'mse', optimizer='adam', metrics=['mae']) # metrics=['mae','mse]
# 매트릭스를 보면 list로 받아들이고 있기 때문에 2개 이상을 쓰는 것도 가능하다.
model.fit([x1_train, x2_train], y_train, epochs=400, batch_size=25, verbose=1, validation_split=0.1)
# # 4. 평가, 예측
result = model.evaluate([x1_test, x2_test], y_test) # evaluate는 loss와 metrics를 출력한다.
print('result : ', result)
y_predict = model.predict([x1_test, x2_test])
r2 = r2_score(y_test, y_predict)
print('r2 스코어 : ', r2)
print('loss : ', result[0])
print('metrics["mae"] : ', result[1])
#r2 스코어 : 0.9914715240776343 -> 0.9997684219501827
# loss 소수점단위까지 낮추기 -> 0.20147289335727692 | [
"[email protected]"
] | |
e23cff71c1b73f9b5b94aefde10c99cbf6be3d6d | 66fb1005aaeb25735a1ae9197ab7dd371862bbf2 | /sysadmin_scripts/mongodb_data_model_3/updateDB.py | 4cd67377a565ab4d07f01bf4870f41dc2df64706 | [
"MIT"
] | permissive | jfnavarro/st_misc | 8a8d87df9e059dbd2a037d4267acd4e21593e7c4 | bb8c1f2c4f05343f6dd5cc8b8cd8f405d825bd31 | refs/heads/master | 2021-01-01T17:13:58.540991 | 2017-08-22T13:04:26 | 2017-08-22T13:04:26 | 98,029,163 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,714 | py | #! /usr/bin/env python
"""
Script to convert ST API database model 2 to model 3
@author: Jose Fernandez
"""
import argparse
import os
import sys
try:
from pymongo import MongoClient
from pymongo import errors
from bson.objectid import ObjectId
except ImportError, e:
sys.stderr.write("Pymongo was not found, aborting...\n")
sys.exit(1)
def usage():
print "Usage:"
print " python updateDB.py [options]"
print "Options:"
print " [-a, --user] => username for the MongoDB admin"
print " [-d, --password] => password for the MongoDB admin"
print " [-c, --host] => (default localhost)"
print " [-p, --port] => (default 27017)"
print "Description:"
print " Updates the ST database from data model 2 to data model 3."
print " NOTE: It is a wise idea to manually run mongodump to create a backup of the data state prior to the update!"
def main(user, password, host, port):
print "Connecting to database..."
mongoConnection = 0
try:
mongoConnection = MongoClient(host, port)
except errors.AutoReconnect:
print 'Cannot connect to database. \nPlease manually start up MongoDB.'
sys.exit(1)
print "mongoConnection" , mongoConnection
print "Authorizing..."
try:
db_admin = mongoConnection["admin"]
db_admin.authenticate(user, password)
print "Authorization Ok!"
except TypeError,e:
sys.stderr.write("There was an error in the authentication: " + str(e) + "\n")
sys.exit(1)
###############################################################################################################
db_analysis = mongoConnection["analysis"]
datasets = db_analysis["dataset"]
datasetinfos = db_analysis["datasetinfo"]
imagealignments = db_analysis["imagealignment"]
chips = db_analysis["chip"]
# Remove the experiment database
mongoConnection.drop_database("experiment")
# Remove some fields in analysis.dataset
datasets.update_many({}, {'$unset' : { 'overall_feature_count' : 1}})
datasets.update_many({}, {'$unset' : { 'overall_hit_count' : 1}})
datasets.update_many({}, {'$unset' : { 'unique_barcode_count' : 1}})
datasets.update_many({}, {'$unset' : { 'overall_hit_quartiles' : 1}})
datasets.update_many({}, {'$unset' : { 'gene_pooled_hit_quartiles' : 1}})
datasets.update_many({}, {'$unset' : { 'obo_foundry_terms' : 1}})
# Remove one field in analaysis.dataset
datasetinfos.update_many({}, {'$unset' : { 'comment' : 1}})
# Update the analysis.dataset collection to add the fields from analysis.imagealignment
for ele in datasets.find():
try:
dataset_id = ele["_id"]
al_id = ele["image_alignment_id"]
valid = True
if al_id is None or al_id == "":
valid = False
else:
al = imagealignments.find_one({"_id": ObjectId(al_id)})
if al is None or al == "":
valid = False
if valid:
datasets.update_one({"_id": dataset_id}, {"$set": {"figureHE": al["figure_blue"]}})
datasets.update_one({"_id": dataset_id}, {"$set": {"figureCy3": al["figure_red"]}})
datasets.update_one({"_id": dataset_id}, {"$set": {"alignmentMatrix": al["alignment_matrix"]}})
datasets.update_one({"_id": dataset_id}, {"$set": {"dataFile": str(dataset_id) + "_stdata.tsv.gz"}})
datasets.update_one({"_id": dataset_id}, {"$set": {"files": []}})
else:
datasets.delete_one({"_id": dataset_id})
except KeyError:
continue
datasets.delete_one({"_id": dataset_id})
# Remove image_alignment_id field from analysis.dataset
datasets.update_many({}, {'$unset' : { 'image_alignment_id' : 1}})
# Remove analysis.imagealignment and analysis.chip
imagealignments.drop()
chips.drop()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-c', '--host', type=str, default="localhost",
help='Address of the host to connect to')
parser.add_argument('-p', '--port', type=int, default=27017,
help='Port of the host to connect to')
parser.add_argument('-a', '--user', required=True, type=str,
help='the user name for the admin of the database')
parser.add_argument('-d', '--password', required=True, type=str,
help='the password for the admin of the database')
args = parser.parse_args()
main(args.user, args.password, args.host, args.port) | [
"[email protected]"
] | |
b0a4eaabb5ac8a0fbcc6c8266ba143827102a7db | 6b98eeaf6eb485e1cc4d56c3eda15b6482f21296 | /app/grandchallenge/evaluation/migrations/0003_config_new_results_are_public.py | d09b3fcc4484ec754dc2e22c6730305c3811c093 | [
"Apache-2.0"
] | permissive | cnbillow/grand-challenge.org | ef2db96c7bc6919aa7ee993d43978f8c3185a71f | de90bd01ca6aa883dcb47c4d005bd15f38549752 | refs/heads/master | 2020-03-28T03:07:41.905924 | 2018-09-06T04:45:57 | 2018-09-06T04:45:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 653 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-03 10:27
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("evaluation", "0002_config_submission_page_html")]
operations = [
migrations.AddField(
model_name="config",
name="new_results_are_public",
field=models.BooleanField(
default=True,
help_text="If true, new results are automatically made public. If false, the challenge administrator must manually publish each new result.",
),
)
]
| [
"[email protected]"
] | |
5ba4cad4c0f578ee5023846bff798403f454451d | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03548/s413867262.py | f0f0fea98a3638b5746c91a4b886907789bfdba3 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | x,y,z = map(int,input().split())
x -= z
r = x // (y + z)
print(r) | [
"[email protected]"
] | |
25901eb4746bde899cc2b41588f21da0d665c13b | 3d19e1a316de4d6d96471c64332fff7acfaf1308 | /Users/E/elleryq/haodooscraper.py | 7afbaca75ca4d510bc65104eb77ff5d74af43eac | [] | no_license | BerilBBJ/scraperwiki-scraper-vault | 4e98837ac3b1cc3a3edb01b8954ed00f341c8fcc | 65ea6a943cc348a9caf3782b900b36446f7e137d | refs/heads/master | 2021-12-02T23:55:58.481210 | 2013-09-30T17:02:59 | 2013-09-30T17:02:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,488 | py | # For scrape Haodoo (http://www.haodoo.net)
import scraperwiki
import simplejson
import lxml.html
import sys
from urlparse import parse_qs
from urllib import urlencode
import traceback
base_url = 'http://www.haodoo.net'
def parse_books_from_html( html ):
"""
Parse the url of each book from the book list page.
The book's title and url will be stored in sqlite database provided by scraperwiki.
"""
root = lxml.html.fromstring(html)
for a in root.cssselect("a"):
href = a.attrib['href']
if href.startswith( '?' ):
s = href[1:]
else:
s = href
book_title = a.text_content()
d = parse_qs( s )
if d.has_key('M') and d['M'][0] in ('book', 'Share'):
if d.has_key('P'):
book_id = d['P'][0]
book = { 'id': book_id, 'url': base_url + href, 'title': book_title }
scraperwiki.sqlite.save(unique_keys=["id"], data=book, table_name="bookpages" )
def find_volume_id( onclick ):
"""
Find book id from the given string. The string actually is javascript function.
"""
# find which kind of quote, ' or "
quote = "'"
start = onclick.find( quote )
if start==-1:
quote = '"'
id = ''
start = onclick.find( quote )
end = onclick.rfind( quote )
if start!=-1 and end!=-1:
id = onclick[ start+1:end ]
return id
def convert_to_dl_url( id, ext ):
"""
According book_id and book type to generate download url.
"""
result = base_url + "/?" + urlencode( {
"M": "d",
"P": id + "." + ext } )
#print( "__convert_to_dl_url()=%s" % result )
return result
def extract_set_title( html ):
start_pos = html.find( 'SetTitle("' )
if start_pos == -1:
return ("", "")
start_quote = html.find( '"', start_pos )
if start_quote == -1:
return ("", "")
end_quote = html.find( '"', start_quote+1 )
if end_quote == -1:
return ("", "")
set_title = html[ start_quote+1: end_quote-1 ]
set_title = set_title.replace( '《', ',' )
r = set_title.split(',')
if len(r)!=2:
return ("", "" )
return r
def analysis_book_html_and_save( book, html ):
doc = lxml.html.fromstring( html )
volume_author, volume_name = extract_set_title( html )
pdb_download_elements = doc.xpath( '//a[contains(@href, "pdb")]' )
if len(pdb_download_elements):
# old style page, only readonline and download link.
save_item = pdb_download_elements[0]
title = save_item.getprevious().text
author = None
if save_item is not None and save_item.getprevious() and save_item.getprevious().getprevious():
author = save_item.getprevious().getprevious().text
volume = {
'id': book['id'],
'bookid': book['id'],
}
if title:
volume['title'] = title
else:
volume['title'] = volume_name
if author:
volume['author'] = author
else:
volume['author'] = volume_author
scraperwiki.sqlite.save(
unique_keys=["volumeid", "type"],
data={
"volumeid": book['id'],
"type": "pdb",
"link": base_url + "/" + save_item.attrib['href']
},
table_name="volumeexts" )
scraperwiki.sqlite.save(unique_keys=["id"], data=volume, table_name="bookvolumes" )
else:
volume = None
exts = []
for save_item in doc.xpath('//input[contains(@type, "button")]'):
onclick = save_item.get('onclick')
id = find_volume_id( onclick )
skip = False
if "ReadOnline" in onclick or "ReadPdbOnline" in onclick:
if volume is not None:
for ext in exts:
scraperwiki.sqlite.save(unique_keys=["volumeid", "type"], data=ext, table_name="volumeexts" )
scraperwiki.sqlite.save(unique_keys=["id"], data=volume, table_name="bookvolumes" )
volume = {
'id': id,
'author': save_item.getprevious().text,
'title': save_item.getprevious().tail,
'bookid': book['id'],
}
exts = []
elif "DownloadEpub" in onclick:
dl_link = convert_to_dl_url( id, "epub" )
exts.append( { "volumeid": id, "type": "epub", "link": dl_link } )
elif "DownloadUpdb" in onclick:
dl_link = convert_to_dl_url( id, "updb" )
exts.append( { "volumeid": id, "type": "updb", "link": dl_link } )
elif "DownloadPdb" in onclick:
dl_link = convert_to_dl_url( id, "pdb" )
exts.append( { "volumeid": id, "type": "pdb", "link": dl_link } )
if volume:
for ext in exts:
scraperwiki.sqlite.save(unique_keys=["volumeid", "type"], data=ext, table_name="volumeexts" )
scraperwiki.sqlite.save(unique_keys=["id"], data=volume, table_name="bookvolumes" )
#
# Main
#
def main():
urls = [
'http://www.haodoo.net/?M=hd&P=wisdom',
'http://www.haodoo.net/?M=hd&P=history',
'http://www.haodoo.net/?M=hd&P=martial',
'http://www.haodoo.net/?M=hd&P=mystery',
'http://www.haodoo.net/?M=hd&P=romance',
'http://www.haodoo.net/?M=hd&P=scifi',
'http://www.haodoo.net/?M=hd&P=fiction',
]
skip_stage1 = False
try:
print( ">>> Stage 1 - Collecting all book urls <<<" )
if not skip_stage1:
for url in urls:
html = scraperwiki.scrape(url)
page = 1
while True:
suburl = "{0}-{1}".format( url, page )
if html.find( suburl[suburl.find('?'):] ):
html = scraperwiki.scrape( suburl )
if html.find("<strong>404")!=-1:
break
parse_books_from_html( html )
page = page + 1
else:
break
print( ">>> Stage 2 - Analysising all book urls <<<" )
for book in scraperwiki.sqlite.select("* from bookpages"):
# grab html
html = scraperwiki.scrape( book['url'] )
# analysis and store information into book
analysis_book_html_and_save( book, html )
print( ">>> State 3 - done <<<" )
except Exception, e:
print( "Got exception:" )
print( e )
print( traceback.format_exc() )
main()
# For scrape Haodoo (http://www.haodoo.net)
import scraperwiki
import simplejson
import lxml.html
import sys
from urlparse import parse_qs
from urllib import urlencode
import traceback
base_url = 'http://www.haodoo.net'
def parse_books_from_html( html ):
"""
Parse the url of each book from the book list page.
The book's title and url will be stored in sqlite database provided by scraperwiki.
"""
root = lxml.html.fromstring(html)
for a in root.cssselect("a"):
href = a.attrib['href']
if href.startswith( '?' ):
s = href[1:]
else:
s = href
book_title = a.text_content()
d = parse_qs( s )
if d.has_key('M') and d['M'][0] in ('book', 'Share'):
if d.has_key('P'):
book_id = d['P'][0]
book = { 'id': book_id, 'url': base_url + href, 'title': book_title }
scraperwiki.sqlite.save(unique_keys=["id"], data=book, table_name="bookpages" )
def find_volume_id( onclick ):
"""
Find book id from the given string. The string actually is javascript function.
"""
# find which kind of quote, ' or "
quote = "'"
start = onclick.find( quote )
if start==-1:
quote = '"'
id = ''
start = onclick.find( quote )
end = onclick.rfind( quote )
if start!=-1 and end!=-1:
id = onclick[ start+1:end ]
return id
def convert_to_dl_url( id, ext ):
"""
According book_id and book type to generate download url.
"""
result = base_url + "/?" + urlencode( {
"M": "d",
"P": id + "." + ext } )
#print( "__convert_to_dl_url()=%s" % result )
return result
def extract_set_title( html ):
start_pos = html.find( 'SetTitle("' )
if start_pos == -1:
return ("", "")
start_quote = html.find( '"', start_pos )
if start_quote == -1:
return ("", "")
end_quote = html.find( '"', start_quote+1 )
if end_quote == -1:
return ("", "")
set_title = html[ start_quote+1: end_quote-1 ]
set_title = set_title.replace( '《', ',' )
r = set_title.split(',')
if len(r)!=2:
return ("", "" )
return r
def analysis_book_html_and_save( book, html ):
doc = lxml.html.fromstring( html )
volume_author, volume_name = extract_set_title( html )
pdb_download_elements = doc.xpath( '//a[contains(@href, "pdb")]' )
if len(pdb_download_elements):
# old style page, only readonline and download link.
save_item = pdb_download_elements[0]
title = save_item.getprevious().text
author = None
if save_item is not None and save_item.getprevious() and save_item.getprevious().getprevious():
author = save_item.getprevious().getprevious().text
volume = {
'id': book['id'],
'bookid': book['id'],
}
if title:
volume['title'] = title
else:
volume['title'] = volume_name
if author:
volume['author'] = author
else:
volume['author'] = volume_author
scraperwiki.sqlite.save(
unique_keys=["volumeid", "type"],
data={
"volumeid": book['id'],
"type": "pdb",
"link": base_url + "/" + save_item.attrib['href']
},
table_name="volumeexts" )
scraperwiki.sqlite.save(unique_keys=["id"], data=volume, table_name="bookvolumes" )
else:
volume = None
exts = []
for save_item in doc.xpath('//input[contains(@type, "button")]'):
onclick = save_item.get('onclick')
id = find_volume_id( onclick )
skip = False
if "ReadOnline" in onclick or "ReadPdbOnline" in onclick:
if volume is not None:
for ext in exts:
scraperwiki.sqlite.save(unique_keys=["volumeid", "type"], data=ext, table_name="volumeexts" )
scraperwiki.sqlite.save(unique_keys=["id"], data=volume, table_name="bookvolumes" )
volume = {
'id': id,
'author': save_item.getprevious().text,
'title': save_item.getprevious().tail,
'bookid': book['id'],
}
exts = []
elif "DownloadEpub" in onclick:
dl_link = convert_to_dl_url( id, "epub" )
exts.append( { "volumeid": id, "type": "epub", "link": dl_link } )
elif "DownloadUpdb" in onclick:
dl_link = convert_to_dl_url( id, "updb" )
exts.append( { "volumeid": id, "type": "updb", "link": dl_link } )
elif "DownloadPdb" in onclick:
dl_link = convert_to_dl_url( id, "pdb" )
exts.append( { "volumeid": id, "type": "pdb", "link": dl_link } )
if volume:
for ext in exts:
scraperwiki.sqlite.save(unique_keys=["volumeid", "type"], data=ext, table_name="volumeexts" )
scraperwiki.sqlite.save(unique_keys=["id"], data=volume, table_name="bookvolumes" )
#
# Main
#
def main():
urls = [
'http://www.haodoo.net/?M=hd&P=wisdom',
'http://www.haodoo.net/?M=hd&P=history',
'http://www.haodoo.net/?M=hd&P=martial',
'http://www.haodoo.net/?M=hd&P=mystery',
'http://www.haodoo.net/?M=hd&P=romance',
'http://www.haodoo.net/?M=hd&P=scifi',
'http://www.haodoo.net/?M=hd&P=fiction',
]
skip_stage1 = False
try:
print( ">>> Stage 1 - Collecting all book urls <<<" )
if not skip_stage1:
for url in urls:
html = scraperwiki.scrape(url)
page = 1
while True:
suburl = "{0}-{1}".format( url, page )
if html.find( suburl[suburl.find('?'):] ):
html = scraperwiki.scrape( suburl )
if html.find("<strong>404")!=-1:
break
parse_books_from_html( html )
page = page + 1
else:
break
print( ">>> Stage 2 - Analysising all book urls <<<" )
for book in scraperwiki.sqlite.select("* from bookpages"):
# grab html
html = scraperwiki.scrape( book['url'] )
# analysis and store information into book
analysis_book_html_and_save( book, html )
print( ">>> State 3 - done <<<" )
except Exception, e:
print( "Got exception:" )
print( e )
print( traceback.format_exc() )
main()
| [
"[email protected]"
] | |
7291d8ba50828814176f7abb193c7cde1e7ba1c6 | 9d93af3cf4a663fe5e9618061a37d0910c089cea | /tests/test_decompressor_decompressobj.py | 8787afa53b47209b0870bd073017dbb59cd40c88 | [
"BSD-3-Clause"
] | permissive | glandium/python-zstandard | 49bd96daed537169345f8024ead5a4fe599f8b4d | 80c3142f274621d11b1e3c401e17ee4b983ab1a5 | refs/heads/master | 2022-12-07T00:34:01.413940 | 2022-10-29T22:33:35 | 2022-10-29T22:38:08 | 134,373,670 | 0 | 0 | null | 2018-05-22T06:57:46 | 2018-05-22T06:57:46 | null | UTF-8 | Python | false | false | 3,799 | py | import unittest
import zstandard as zstd
class TestDecompressor_decompressobj(unittest.TestCase):
def test_simple(self):
data = zstd.ZstdCompressor(level=1).compress(b"foobar")
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
self.assertFalse(dobj.eof)
self.assertEqual(dobj.decompress(data), b"foobar")
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
self.assertTrue(dobj.eof)
self.assertEqual(dobj.flush(), b"")
self.assertEqual(dobj.flush(10), b"")
self.assertEqual(dobj.flush(length=100), b"")
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
def test_input_types(self):
compressed = zstd.ZstdCompressor(level=1).compress(b"foo")
dctx = zstd.ZstdDecompressor()
mutable_array = bytearray(len(compressed))
mutable_array[:] = compressed
sources = [
memoryview(compressed),
bytearray(compressed),
mutable_array,
]
for source in sources:
dobj = dctx.decompressobj()
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
self.assertFalse(dobj.eof)
self.assertEqual(dobj.flush(), b"")
self.assertEqual(dobj.flush(10), b"")
self.assertEqual(dobj.flush(length=100), b"")
self.assertEqual(dobj.decompress(source), b"foo")
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.unconsumed_tail, b"")
self.assertTrue(dobj.eof)
self.assertEqual(dobj.flush(), b"")
def test_unused_data(self):
data = zstd.ZstdCompressor(level=1).compress(b"foobar")
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
self.assertEqual(dobj.unused_data, b"")
self.assertEqual(dobj.decompress(data + b"extra"), b"foobar")
self.assertTrue(dobj.eof)
self.assertEqual(dobj.unused_data, b"extra")
def test_reuse(self):
data = zstd.ZstdCompressor(level=1).compress(b"foobar")
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
dobj.decompress(data)
with self.assertRaisesRegex(
zstd.ZstdError, "cannot use a decompressobj"
):
dobj.decompress(data)
self.assertEqual(dobj.flush(), b"")
def test_multiple_decompress_calls(self):
expected = b"foobar" * 10
data = zstd.ZstdCompressor(level=1).compress(expected)
N = 3
partitioned_data = [
data[len(data) * i // N : len(data) * (i + 1) // N]
for i in range(N)
]
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
for partition in partitioned_data[:-1]:
decompressed = dobj.decompress(partition)
self.assertEqual(decompressed, b"")
self.assertEqual(dobj.unused_data, b"")
decompressed = dobj.decompress(partitioned_data[-1])
self.assertEqual(decompressed, expected)
def test_bad_write_size(self):
dctx = zstd.ZstdDecompressor()
with self.assertRaisesRegex(ValueError, "write_size must be positive"):
dctx.decompressobj(write_size=0)
def test_write_size(self):
source = b"foo" * 64 + b"bar" * 128
data = zstd.ZstdCompressor(level=1).compress(source)
dctx = zstd.ZstdDecompressor()
for i in range(128):
dobj = dctx.decompressobj(write_size=i + 1)
self.assertEqual(dobj.decompress(data), source)
| [
"[email protected]"
] | |
57848684f29088f1594e93d18a9cca0f11cda17c | c8781d3dc17202fcc1b5358475071c0a834c7f82 | /ShowAndSearch/utils/parser.py | fe86a895837ffa7cf0261b804c6bb2395d13278d | [
"Apache-2.0"
] | permissive | guchengxi1994/show-and-search | 7b73d4a7a0250a0f70cf07b0de7695d6c8051545 | e955a6677f3cd23b1f7ed247e828a5852ec6ab20 | refs/heads/master | 2022-12-22T06:28:36.601500 | 2020-09-22T05:17:14 | 2020-09-22T05:17:14 | 295,630,132 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,636 | py | '''
lanhuage: python
Descripttion:
version: beta
Author: xiaoshuyui
Date: 2020-09-15 15:59:10
LastEditors: xiaoshuyui
LastEditTime: 2020-09-22 11:19:20
'''
import argparse
from ShowAndSearch.utils.logger import logger
class BaseParser(object):
def __init__(self, args: list, method: str):
"""
args type:list
arg type:tuple
arg example : ('-f','--force','force to show message even do not contain the module')
"""
self.args = args
self.method = method
self.parser = argparse.ArgumentParser(
description='{} method or module information'.format(self.method))
def get_parser(self):
self.parser.add_argument(
'question', metavar='QUESTION', type=str, nargs='*', help='the question to answer')
self.parser.add_argument(
'-v', '--version', help='show current version', action='store_true')
if len(self.args) > 0:
# self.parser.add_argument('-f','--force',help='force to show message even do not contain the module')
# self.parser.add_argument('-s','--simple',help='show simple message')
for i in self.args:
self.parser.add_argument(
i[0], i[1], help=i[2], action='store_true')
else:
logger.warning('args list is null')
return self.parser
def add_parser(self, arg):
if type(arg) is tuple and len(arg) == 3:
self.parser.add_argument(
arg[0], arg[1], help=arg[2], action='store_true')
else:
logger.error('input error')
return self.parser
| [
"[email protected]"
] | |
b4922344953251b60d5c915c16fcf704c828a4f4 | 74217e968104103957048b4edfb024c8b42edf4d | /hvad/tests/contrib/restframework.py | 7098219d4494b36587a7d8793e6bb0fb4923ae2e | [
"BSD-3-Clause"
] | permissive | trungdq88/django-hvad | 41fcc004419a5f0698791dbd1ce5888f3234b2db | 496aa83553ced45bedbced7b5b364c4436e9c8e2 | refs/heads/master | 2021-01-24T23:51:38.392630 | 2015-12-23T14:20:24 | 2015-12-23T14:20:24 | 48,811,194 | 0 | 0 | null | 2015-12-30T17:10:31 | 2015-12-30T17:10:31 | null | UTF-8 | Python | false | false | 21,664 | py | # -*- coding: utf-8 -*-
from rest_framework.serializers import ModelSerializer, CharField
from hvad.test_utils.context_managers import LanguageOverride
from hvad.test_utils.testcase import HvadTestCase
from hvad.test_utils.project.app.models import Normal
from hvad.test_utils.data import NORMAL
from hvad.test_utils.fixtures import NormalFixture
from hvad.contrib.restframework import (TranslationsMixin,
TranslatableModelSerializer)
from hvad.contrib.restframework.serializers import TranslationListSerializer
#=============================================================================
class AutoSerializer(TranslatableModelSerializer):
class Meta:
model = Normal
class ManualSerializer(TranslatableModelSerializer):
class Meta:
model = Normal
fields = ['shared_field', 'translated_field']
class ExcludeSerializer(TranslatableModelSerializer):
class Meta:
model = Normal
exclude = ['translated_field']
class TranslationsSerializer(TranslationsMixin, ModelSerializer):
class Meta:
model = Normal
class CombinedSerializer(TranslationsMixin, TranslatableModelSerializer):
class Meta:
model = Normal
class CustomTranslationSerializer(ModelSerializer):
# 'cheat' tests that shared fields are accessible to the translation serializer
# It is relevant, it ensures custom serializers see the full object, along with
# any @property. Default serializer will just get to translated fields through
# their accessors on the shared object and work transparently.
cheat = CharField(max_length=250, source='shared_field')
custom = CharField(max_length=250, source='translated_field')
class Meta:
exclude = ('translated_field',)
class CustomSerializer(TranslationsMixin, ModelSerializer):
class Meta:
model = Normal
translations_serializer = CustomTranslationSerializer
#=============================================================================
class TranslatableModelSerializerTests(HvadTestCase, NormalFixture):
'Checking the serializer representation of objects'
normal_count = 1
#---------------------------------------------------------------------
def test_modelserializer_fields(self):
'Check serializers fields are properly set'
serializer = AutoSerializer()
self.assertCountEqual(serializer.fields,
['id', 'shared_field', 'translated_field', 'language_code'])
serializer = ManualSerializer()
self.assertCountEqual(serializer.fields,
['shared_field', 'translated_field'])
serializer = ExcludeSerializer()
self.assertCountEqual(serializer.fields,
['id', 'shared_field', 'language_code'])
#---------------------------------------------------------------------
def test_serialize_normal(self):
'Serialize translated fields using instance language'
obj = Normal.objects.language('ja').get(pk=self.normal_id[1])
serializer = AutoSerializer(instance=obj)
data = serializer.data
self.assertCountEqual(data, ['id', 'shared_field', 'translated_field', 'language_code'])
self.assertEqual(data['id'], self.normal_id[1])
self.assertEqual(data['shared_field'], NORMAL[1].shared_field)
self.assertEqual(data['translated_field'], NORMAL[1].translated_field['ja'])
self.assertEqual(data['language_code'], 'ja')
def test_serialize_enforce_wrong(self):
'Serialize translated fields while enforcing a language - wrong translation'
obj = Normal.objects.language('ja').get(pk=self.normal_id[1])
serializer = AutoSerializer(instance=obj, language='en')
data = serializer.data
self.assertCountEqual(data, ['id', 'shared_field', 'translated_field', 'language_code'])
self.assertEqual(data['id'], self.normal_id[1])
self.assertEqual(data['shared_field'], NORMAL[1].shared_field)
self.assertEqual(data['translated_field'], NORMAL[1].translated_field['en'])
self.assertEqual(data['language_code'], 'en')
def test_serialize_enforce_nonexistent(self):
'Serialize translated fields while enforcing a language - nonexistent translation'
obj = Normal.objects.language('ja').get(pk=self.normal_id[1])
serializer = AutoSerializer(instance=obj, language='xx')
data = serializer.data
self.assertCountEqual(data, ['id', 'shared_field', 'translated_field', 'language_code'])
self.assertEqual(data['id'], self.normal_id[1])
self.assertEqual(data['shared_field'], NORMAL[1].shared_field)
self.assertEqual(data['translated_field'], '')
self.assertEqual(data['language_code'], 'xx')
#---------------------------------------------------------------------
def test_create_normal(self):
'Deserialize a new instance'
data = {
'shared_field': 'shared',
'translated_field': 'translated',
'language_code': 'en'
}
serializer = AutoSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertIsNotNone(obj.pk)
self.assertSavedObject(obj, 'en', **data)
def test_create_enforce(self):
'Deserialize a new instance, with language-enforcing mode'
data = {
'shared_field': 'shared',
'translated_field': 'translated',
}
serializer = AutoSerializer(data=data, language='sr')
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertIsNotNone(obj.pk)
self.assertSavedObject(obj, 'sr', **data)
def test_create_enforce_violation(self):
'Deserialize a new instance, with language-enforcing mode and language_code'
data = {
'shared_field': 'shared',
'translated_field': 'translated',
'language_code': 'en',
}
serializer = AutoSerializer(data=data, language='en')
self.assertFalse(serializer.is_valid())
serializer = AutoSerializer(data=data, language='xx')
self.assertFalse(serializer.is_valid())
def test_update_normal_default(self):
'Deserialize an existing instance using instance-loaded language'
obj = Normal.objects.language('ja').get(pk=self.normal_id[1])
data = {
'shared_field': 'shared',
'translated_field': 'translated',
}
serializer = AutoSerializer(instance=obj, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
self.assertSavedObject(obj, 'ja', **data)
obj = Normal.objects.untranslated().get(pk=self.normal_id[1])
serializer = AutoSerializer(instance=obj, data=data)
self.assertTrue(serializer.is_valid())
with LanguageOverride('en'):
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
self.assertSavedObject(obj, 'en', **data)
def test_update_normal_language_code(self):
'Deserialize an existing instance using submitted language'
obj = Normal.objects.language('ja').get(pk=self.normal_id[1])
data = {
'shared_field': 'shared',
'translated_field': 'translated',
'language_code': 'sr'
}
serializer = AutoSerializer(instance=obj, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
self.assertSavedObject(obj, 'sr', **data)
obj = Normal.objects.untranslated().get(pk=self.normal_id[1])
data['translated_field'] = 'translated_bis'
serializer = AutoSerializer(instance=obj, data=data)
self.assertTrue(serializer.is_valid())
with LanguageOverride('en'):
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
self.assertSavedObject(obj, 'sr', **data)
def test_update_enforce(self):
'Deserialize an existing intance in language-enforcing mode'
data = {
'shared_field': 'shared',
'translated_field': 'translated',
}
# Correct translation
obj = Normal.objects.language('ja').get(pk=self.normal_id[1])
serializer = AutoSerializer(instance=obj, data=data, language='ja')
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
self.assertSavedObject(obj, 'ja', **data)
# Wrong translation
obj = Normal.objects.language('en').get(pk=self.normal_id[1])
serializer = AutoSerializer(instance=obj, data=data, language='ja')
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
self.assertSavedObject(obj, 'ja', **data)
# Nonexistent translation
obj = Normal.objects.language('en').get(pk=self.normal_id[1])
serializer = AutoSerializer(instance=obj, data=data, language='sr')
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
self.assertSavedObject(obj, 'sr', **data)
#=============================================================================
class TranslationsMixinTests(HvadTestCase, NormalFixture):
normal_count = 1
def test_translations_mixin_fields(self):
'Check serializers fields are properly set'
serializer = TranslationsSerializer()
self.assertCountEqual(serializer.fields,
['id', 'shared_field', 'translations'])
self.assertIsInstance(serializer.fields['translations'], TranslationListSerializer)
self.assertCountEqual(serializer.fields['translations'].child.fields,
['translated_field'])
serializer = CustomSerializer()
self.assertCountEqual(serializer.fields,
['id', 'shared_field', 'translations'])
self.assertIsInstance(serializer.fields['translations'], TranslationListSerializer)
self.assertIsInstance(serializer.fields['translations'].child, CustomTranslationSerializer)
self.assertCountEqual(serializer.fields['translations'].child.fields, ['cheat', 'custom'])
#---------------------------------------------------------------------
def test_serialize(self):
'Serialize nested translations as a language => fields dict'
obj = Normal.objects.prefetch_related('translations').get(pk=self.normal_id[1])
serializer = TranslationsSerializer(instance=obj)
data = serializer.data
self.assertCountEqual(data, ['id', 'shared_field', 'translations'])
self.assertEqual(data['id'], self.normal_id[1])
self.assertEqual(data['shared_field'], NORMAL[1].shared_field)
self.assertIsInstance(data['translations'], dict)
self.assertCountEqual(data['translations'], self.translations)
for language in self.translations:
translation = data['translations'][language]
self.assertCountEqual(translation, ['translated_field'])
self.assertEqual(translation['translated_field'], NORMAL[1].translated_field[language])
def test_serialize_custom(self):
'Serialize nested translations as a language => fields dict'
obj = Normal.objects.prefetch_related('translations').get(pk=self.normal_id[1])
serializer = CustomSerializer(instance=obj)
data = serializer.data
self.assertCountEqual(data, ['id', 'shared_field', 'translations'])
self.assertEqual(data['id'], self.normal_id[1])
self.assertEqual(data['shared_field'], NORMAL[1].shared_field)
self.assertIsInstance(data['translations'], dict)
self.assertCountEqual(data['translations'], self.translations)
for language in self.translations:
translation = data['translations'][language]
self.assertCountEqual(translation, ['cheat', 'custom'])
self.assertEqual(translation['cheat'], NORMAL[1].shared_field)
self.assertEqual(translation['custom'], NORMAL[1].translated_field[language])
#---------------------------------------------------------------------
def test_invalid(self):
'Submit invalid data'
# No translations
data = {
'shared_field': 'shared',
'translations': {},
}
serializer = TranslationsSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue(serializer.errors['translations'])
# Invalid translations type
data = {
'shared_field': 'shared',
'translations': [
{ 'translated_field': 'English', },
],
}
serializer = TranslationsSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue(serializer.errors['translations'])
# Cascade invalid child
data = {
'shared_field': 'shared',
'translations': {
'en': { 'translated_field': 'x'*999 },
},
}
serializer = TranslationsSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertTrue(serializer.errors['translations'])
self.assertTrue(serializer.errors['translations']['en'])
self.assertTrue(serializer.errors['translations']['en']['translated_field'])
#---------------------------------------------------------------------
def test_create(self):
'Create a new Normal instance, with two translations'
data = {
'shared_field': 'shared',
'translations': {
'en': { 'translated_field': 'English', },
'sr': { 'translated_field': u'српски', },
},
}
serializer = TranslationsSerializer(data=data)
self.assertTrue(serializer.is_valid())
with self.assertNumQueries(3): # insert shared, insert "en", insert "sr"
obj = serializer.save()
self.assertIsNot(obj.pk, None)
qs = Normal.objects.language('all').filter(pk=obj.pk)
self.assertCountEqual([(obj.language_code, obj.translated_field) for obj in qs],
[('en', 'English'), ('sr', u'српски')])
def test_update(self):
'Update an existing normal instance: 1 new, 1 updated, 1 deleted translations'
obj = Normal.objects.untranslated().prefetch_related('translations').get(pk=self.normal_id[1])
data = {
'shared_field': 'shared',
'translations': {
'en': { 'translated_field': 'English', }, # should updated
'sr': { 'translated_field': u'српски', }, # should create
}, # Japanese should be deleted
}
serializer = TranslationsSerializer(instance=obj, data=data)
self.assertTrue(serializer.is_valid())
with self.assertNumQueries(4): # update shared, update "en", insert "sr", delete others
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
qs = Normal.objects.language('all').filter(pk=self.normal_id[1])
self.assertCountEqual([(obj.language_code, obj.translated_field) for obj in qs],
[('en', 'English'), ('sr', u'српски')])
def test_update_partial(self):
'Update an existing instance, but just some fields'
obj = Normal.objects.untranslated().get(pk=self.normal_id[1])
data = {
'shared_field': 'shared'
}
serializer = TranslationsSerializer(instance=obj, data=data, partial=True)
self.assertTrue(serializer.is_valid())
with self.assertNumQueries(1): # update shared
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
qs = Normal.objects.language('all').filter(pk=self.normal_id[1], shared_field='shared')
self.assertCountEqual([obj.language_code for obj in qs], self.translations)
#=============================================================================
class CombinedTests(HvadTestCase, NormalFixture):
normal_count = 1
def test_combined_fields(self):
'Check serializers fields are properly set'
serializer = CombinedSerializer()
self.assertCountEqual(serializer.fields,
['id', 'shared_field', 'translated_field', 'language_code', 'translations'])
self.assertIsInstance(serializer.fields['translations'], TranslationListSerializer)
self.assertCountEqual(serializer.fields['translations'].child.fields,
['translated_field'])
#---------------------------------------------------------------------
def test_serialize(self):
'Serialize translations as a language => fields dict + naive fields'
obj = Normal.objects.language('ja').prefetch_related('translations').get(pk=self.normal_id[1])
serializer = CombinedSerializer(instance=obj)
data = serializer.data
self.assertCountEqual(data, ['id', 'shared_field', 'translated_field', 'language_code', 'translations'])
self.assertEqual(data['id'], self.normal_id[1])
self.assertEqual(data['shared_field'], NORMAL[1].shared_field)
self.assertEqual(data['translated_field'], NORMAL[1].translated_field['ja'])
self.assertEqual(data['language_code'], 'ja')
self.assertIsInstance(data['translations'], dict)
self.assertCountEqual(data['translations'], self.translations)
for language in self.translations:
translation = data['translations'][language]
self.assertCountEqual(translation, ['translated_field'])
self.assertEqual(translation['translated_field'], NORMAL[1].translated_field[language])
#---------------------------------------------------------------------
def test_create_translations(self):
'Create a new Normal instance, with two translations'
data = {
'shared_field': 'shared',
'translated_field': 'should be ignored',
'language_code': 'sr',
'translations': {
'en': { 'translated_field': 'English', },
'sr': { 'translated_field': u'српски', },
},
}
serializer = CombinedSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertIsNot(obj.pk, None)
qs = Normal.objects.language('all').filter(pk=obj.pk)
self.assertCountEqual([(obj.language_code, obj.translated_field) for obj in qs],
[('en', 'English'), ('sr', u'српски')])
def test_create_translatable(self):
'Create a new Normal instance, in translatablemodelserializer style'
data = {
'shared_field': 'shared',
'translated_field': u'српски',
'language_code': 'sr'
}
serializer = CombinedSerializer(data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertIsNot(obj.pk, None)
qs = Normal.objects.language('all').filter(pk=obj.pk)
self.assertCountEqual([(obj.language_code, obj.translated_field) for obj in qs],
[('sr', u'српски')])
def test_update_translations(self):
'Update an existing normal instance: 1 new, 1 updated, 1 deleted translations'
obj = Normal.objects.untranslated().get(pk=self.normal_id[1])
data = {
'shared_field': 'shared',
'language_code': 'ignored',
'translations': {
'en': { 'translated_field': 'English', }, # should updated
'sr': { 'translated_field': u'српски', }, # should create
}, # Japanese should be deleted
}
serializer = CombinedSerializer(instance=obj, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
qs = Normal.objects.language('all').filter(pk=self.normal_id[1])
self.assertCountEqual([(obj.language_code, obj.translated_field) for obj in qs],
[('en', 'English'), ('sr', u'српски')])
def test_update_translatable(self):
'Update an existing normal instance translation in translatablemodel mode'
obj = Normal.objects.untranslated().get(pk=self.normal_id[1])
data = {
'shared_field': 'shared',
'translated_field': u'српски',
'language_code': 'sr'
}
serializer = CombinedSerializer(instance=obj, data=data)
self.assertTrue(serializer.is_valid())
obj = serializer.save()
self.assertEqual(obj.pk, self.normal_id[1])
qs = Normal.objects.language('all').filter(pk=self.normal_id[1])
self.assertCountEqual([(obj.language_code, obj.translated_field) for obj in qs],
[('en', NORMAL[1].translated_field['en']),
('ja', NORMAL[1].translated_field['ja']),
('sr', u'српски')])
| [
"[email protected]"
] | |
6a0aec763b5e253145873cd3bed3a39e26344b7f | 016b7b0cdd60900ca9b2e26f959142c30313e00d | /report/views.py | 1c7bc22d70a1ad296f8eed19d0e4747783cedc6b | [] | no_license | groob/imagr_server | 1e2abdab290b020225359103e72f56ecec7d52b5 | 81dfa968ed48ec719803dd0d53f17b92130e76da | refs/heads/master | 2020-04-05T23:06:40.972867 | 2015-06-11T14:46:15 | 2015-06-11T14:46:15 | 51,090,790 | 0 | 0 | null | 2016-02-04T16:58:34 | 2016-02-04T16:58:34 | null | UTF-8 | Python | false | false | 773 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from models import *
# Create your views here.
@csrf_exempt
def index(request):
data = request.POST
serial = data['serial']
message = data['message']
status = data['status']
# see if the computer exists
if serial:
try:
computer = Computer.objects.get(serial_number=serial)
except Computer.DoesNotExist:
computer = Computer(serial_number=serial)
computer.current_status = status
computer.save()
# create a new report object
report = Report(computer=computer, message=message, status=status)
report.save()
return HttpResponse(data)
| [
"[email protected]"
] | |
beefc26ee5cc6b2af147350338002391621f0297 | 80e6e31054fe9105d2c26be7aac53c4cd6a4a33f | /scripts/kettle/oracle2hive.py | 532ed84e5b965fec9f9c21de191d5f8bc008386d | [] | no_license | alionishere/learn_python | 8a7f6dc7d754a357d4cb720f4bc0d5c3e6e5e895 | 832b8e0579da0b7ab37e815be10204f8de1ad22d | refs/heads/master | 2021-06-24T11:02:05.111027 | 2021-06-23T08:47:06 | 2021-06-23T08:47:06 | 223,834,194 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,666 | py | # -*- coding: utf-8 -*-
import json
import dbutils
get_ora_meta_sql = '''
SELECT t1.OWNER
,t1.TABLE_NAME
,t1.COLUMN_NAME
,t1.DATA_TYPE
,t1.DATA_LENGTH
,t1.DATA_PRECISION
,t1.DATA_SCALE
,t2.COMMENTS
FROM DBA_TAB_COLUMNS t1
LEFT JOIN DBA_COL_COMMENTS t2
ON t1.OWNER = t2.OWNER
AND t1.TABLE_NAME = t2.TABLE_NAME
AND t1.COLUMN_NAME = t2.COLUMN_NAME
WHERE t1.OWNER = '%s'
AND t1.TABLE_NAME = '%s'
ORDER BY COLUMN_ID
'''
get_mysql_meta_sql = '''
SELECT TABLE_SCHEMA
,TABLE_NAME
,COLUMN_NAME
,ORDINAL_POSITION
,DATA_TYPE
,CHARACTER_MAXIMUM_LENGTH
,CHARACTER_OCTET_LENGTH
,NUMERIC_PRECISION
,NUMERIC_SCALE
,COLUMN_TYPE
FROM INFORMATION_SCHEMA.COLUMNS
WHERE TABLE_SCHEMA = '%s'
AND TABLE_NAME = '%s'
;
'''
def get_ora_meta(conn, sql, src_schema, src_tb, hive_schema='', hive_tb=''):
fields = []
field_attrs = []
cur = conn.cursor()
sql = sql % (src_schema.upper(), src_tb.upper())
print(sql)
print('--' * 30)
cur.execute(sql)
res = cur.fetchall()
for field in res:
if field[3] == 'CLOB' or field == 'DATE':
field_attr = field[2] + ' STRING ' + 'COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
elif field[3] == 'VARCHAR2' or field[3] == 'VARCHAR' or field[3] == 'CHAR':
field_attr = field[2] + ' VARCHAR(' + str(field[4]) + ') COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
elif field[3] == 'NUMBER':
field_attr = ''
if field[6] == 0:
field_attr = field[2] + ' BIGINT ' + 'COMMENT \'' + str(field[7]) + '\''
elif field[5] is not None and field[6] is not None:
field_attr = field[2] + ' DECIMAL(' + str(field[5]) + ',' + str(field[6]) + ') COMMENT \'' + str(field[7]) + '\''
else:
field_attr = field[2] + ' DECIMAL(23,4)' + ' COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
else:
field_attr = field[2] + ' STRING ' + ' COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
# print(field)
fields.append(field[2])
# break
cur.close()
fields = ','.join(fields)
field_attrs = ',\n'.join(field_attrs)
# print(field_attrs)
create_str = '''
CREATE TABLE %s.%s (\n%s\n)
PARTITIONED BY (TX_DATE STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001'
STORED AS PARQUE
LOCATION '/DWZQ/%s/%s';
'''
hive_tb = '%s_%s' % (src_schema, src_tb)
hive_tb_temp = '%s_TEMP' % hive_tb
create_stmt = create_str % (hive_schema.upper(), hive_tb.upper(), field_attrs, hive_schema.upper(), hive_tb.upper())
create_stmt_temp = create_str % (hive_schema.upper(), hive_tb_temp.upper(), field_attrs, hive_schema.upper(), hive_tb_temp.upper())
print(create_stmt)
print(create_stmt_temp)
return create_stmt
def get_mysql_meta(conn, sql, src_schema, src_tb, hive_schema='', hive_tb=''):
fields = []
field_attrs = []
cur = conn.cursor()
sql = sql % (src_schema.upper(), src_tb.upper())
print(sql)
print('--' * 30)
cur.execute(sql)
res = cur.fetchall()
for field in res:
if field[3] == 'CLOB' or field == 'DATE':
field_attr = field[2] + ' STRING ' + 'COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
elif field[3] == 'VARCHAR2' or field[3] == 'VARCHAR' or field[3] == 'CHAR':
field_attr = field[2] + ' VARCHAR(' + str(field[4]) + ') COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
elif field[3] == 'NUMBER':
field_attr = ''
if field[6] == 0:
field_attr = field[2] + ' BIGINT ' + 'COMMENT \'' + str(field[7]) + '\''
elif field[5] is not None and field[6] is not None:
field_attr = field[2] + ' DECIMAL(' + str(field[5]) + ',' + str(field[6]) + ') COMMENT \'' + str(field[7]) + '\''
else:
field_attr = field[2] + ' DECIMAL(23,4)' + ' COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
else:
field_attr = field[2] + ' STRING ' + ' COMMENT \'' + str(field[7]) + '\''
field_attrs.append(field_attr)
# print(field)
fields.append(field[2])
# break
cur.close()
fields = ','.join(fields)
field_attrs = ',\n'.join(field_attrs)
# print(field_attrs)
create_str = '''
CREATE TABLE %s.%s (\n%s\n)
PARTITIONED BY (TX_DATE STRING)
ROW FORMAT DELIMITED FIELDS TERMINATED BY '\u0001'
STORED AS PARQUE
LOCATION '/DWZQ/%s/%s';
'''
hive_tb = '%s_%s' % (src_schema, src_tb)
hive_tb_temp = '%s_TEMP' % hive_tb
create_stmt = create_str % (hive_schema.upper(), hive_tb.upper(), field_attrs, hive_schema.upper(), hive_tb.upper())
create_stmt_temp = create_str % (hive_schema.upper(), hive_tb_temp.upper(), field_attrs, hive_schema.upper(), hive_tb_temp.upper())
print(create_stmt)
print(create_stmt_temp)
return create_stmt
def run(tb_info_details):
for tb_info in tb_info_details:
conn = dbutils.get_conn(tb_info['data_src'].lower())
src_owner = tb_info['src_tb'].split('.')[0]
src_tb = tb_info['src_tb'].split('.')[1]
hive_schema = tb_info['data_src']
# hive_tb =
get_ora_meta(conn, get_ora_meta_sql, src_owner, src_tb, hive_schema)
if __name__ == '__main__':
with open('cfg.json', 'r') as f:
tb_info_details = json.load(f)
run(tb_info_details)
| [
"[email protected]"
] | |
ed264a15f7a93c1ffc3c24393851337420b1c8c5 | 5f67c696967456c063e5f8a0d14cf18cf845ad38 | /archiv/_python/py4inf/gmane/gyear.py | 30e892a7e7d666c4991703bf713d123ac276373c | [] | no_license | wuxi20/Pythonista | 3f2abf8c40fd6554a4d7596982c510e6ba3d6d38 | acf12d264615749f605a0a6b6ea7ab72442e049c | refs/heads/master | 2020-04-02T01:17:39.264328 | 2019-04-16T18:26:59 | 2019-04-16T18:26:59 | 153,848,116 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,705 | py | import sqlite3
import time
import urllib.request, urllib.parse, urllib.error
import zlib
conn = sqlite3.connect('index.sqlite')
conn.text_factory = str
cur = conn.cursor()
# Determine the top ten organizations
cur.execute('''SELECT Messages.id, sender FROM Messages
JOIN Senders ON Messages.sender_id = Senders.id''')
sendorgs = dict()
for message_row in cur :
sender = message_row[1]
pieces = sender.split("@")
if len(pieces) != 2 : continue
dns = pieces[1]
sendorgs[dns] = sendorgs.get(dns,0) + 1
# pick the top schools
orgs = sorted(sendorgs, key=sendorgs.get, reverse=True)
orgs = orgs[:10]
print("Top 10 Organizations")
print(orgs)
# orgs = ['total'] + orgs
# Read through the messages
counts = dict()
years = list()
cur.execute('''SELECT Messages.id, sender, sent_at FROM Messages
JOIN Senders ON Messages.sender_id = Senders.id''')
for message_row in cur :
sender = message_row[1]
pieces = sender.split("@")
if len(pieces) != 2 : continue
dns = pieces[1]
if dns not in orgs : continue
year = message_row[2][:4]
if year not in years : years.append(year)
key = (year, dns)
counts[key] = counts.get(key,0) + 1
tkey = (year, 'total')
counts[tkey] = counts.get(tkey,0) + 1
years.sort()
print(counts)
print(years)
fhand = open('gline.js','w')
fhand.write("gline = [ ['Year'")
for org in orgs:
fhand.write(",'"+org+"'")
fhand.write("]")
# for year in years[1:-1]:
for year in years:
fhand.write(",\n['"+year+"'")
for org in orgs:
key = (year, org)
val = counts.get(key,0)
fhand.write(","+str(val))
fhand.write("]");
fhand.write("\n];\n")
print("Output written to gline.js")
| [
"[email protected]"
] | |
0e688ee0061c5df180a71f4e16541de90c10d0b4 | 39bc099123097e1a183b44437954a3f037125891 | /tests/ext/modeling.py | b4844cc12cbbd9efb8749f282a606990ae3eb1a8 | [
"MIT"
] | permissive | pranaya-mathur/bert-for-tf2 | 81c5f1db765ca6e06651e284f0911ba9099c6f99 | cad915ee9d20802a05181373fe30b716a70bc870 | refs/heads/master | 2020-09-05T18:00:24.987757 | 2019-11-05T16:19:44 | 2019-11-05T16:19:44 | 220,175,286 | 1 | 0 | MIT | 2019-11-07T07:19:26 | 2019-11-07T07:19:25 | null | UTF-8 | Python | false | false | 38,936 | py | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The main BERT model and related functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import json
import math
import re
import numpy as np
import six
import tensorflow as tf
class BertConfig(object):
"""Configuration for `BertModel`."""
def __init__(self,
vocab_size,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
initializer_range=0.02):
"""Constructs BertConfig.
Args:
vocab_size: Vocabulary size of `inputs_ids` in `BertModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer encoder.
num_attention_heads: Number of attention heads for each attention layer in
the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e., feed-forward)
layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string) in the
encoder and pooler.
hidden_dropout_prob: The dropout probability for all fully connected
layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this model might
ever be used with. Typically set this to something large just in case
(e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed into
`BertModel`.
initializer_range: The stdev of the truncated_normal_initializer for
initializing all weight matrices.
"""
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
@classmethod
def from_dict(cls, json_object):
"""Constructs a `BertConfig` from a Python dictionary of parameters."""
config = BertConfig(vocab_size=None)
for (key, value) in six.iteritems(json_object):
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `BertConfig` from a json file of parameters."""
with tf.io.gfile.GFile(json_file, "r") as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class BertModel(object):
"""BERT model ("Bidirectional Encoder Representations from Transformers").
Example usage:
```python
# Already been converted into WordPiece token ids
input_ids = tf.constant([[31, 51, 99], [15, 5, 0]])
input_mask = tf.constant([[1, 1, 1], [1, 1, 0]])
token_type_ids = tf.constant([[0, 0, 1], [0, 2, 0]])
config = modeling.BertConfig(vocab_size=32000, hidden_size=512,
num_hidden_layers=8, num_attention_heads=6, intermediate_size=1024)
model = modeling.BertModel(config=config, is_training=True,
input_ids=input_ids, input_mask=input_mask, token_type_ids=token_type_ids)
label_embeddings = tf.get_variable(...)
pooled_output = model.get_pooled_output()
logits = tf.matmul(pooled_output, label_embeddings)
...
```
"""
def __init__(self,
config,
is_training,
input_ids,
input_mask=None,
token_type_ids=None,
use_one_hot_embeddings=False,
scope=None):
"""Constructor for BertModel.
Args:
config: `BertConfig` instance.
is_training: bool. true for training model, false for eval model. Controls
whether dropout will be applied.
input_ids: int32 Tensor of shape [batch_size, seq_length].
input_mask: (optional) int32 Tensor of shape [batch_size, seq_length].
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
use_one_hot_embeddings: (optional) bool. Whether to use one-hot word
embeddings or tf.embedding_lookup() for the word embeddings.
scope: (optional) variable scope. Defaults to "bert".
Raises:
ValueError: The config is invalid or one of the input tensor shapes
is invalid.
"""
config = copy.deepcopy(config)
if not is_training:
config.hidden_dropout_prob = 0.0
config.attention_probs_dropout_prob = 0.0
input_shape = get_shape_list(input_ids, expected_rank=2)
batch_size = input_shape[0]
seq_length = input_shape[1]
if input_mask is None:
input_mask = tf.ones(shape=[batch_size, seq_length], dtype=tf.int32)
if token_type_ids is None:
token_type_ids = tf.zeros(shape=[batch_size, seq_length], dtype=tf.int32)
with tf.compat.v1.variable_scope(scope, default_name="bert"):
with tf.compat.v1.variable_scope("embeddings"):
# Perform embedding lookup on the word ids.
(self.embedding_output, self.embedding_table) = embedding_lookup(
input_ids=input_ids,
vocab_size=config.vocab_size,
embedding_size=config.hidden_size,
initializer_range=config.initializer_range,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=use_one_hot_embeddings)
# Add positional embeddings and token type embeddings, then layer
# normalize and perform dropout.
self.embedding_output = embedding_postprocessor(
input_tensor=self.embedding_output,
use_token_type=True,
token_type_ids=token_type_ids,
token_type_vocab_size=config.type_vocab_size,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=config.initializer_range,
max_position_embeddings=config.max_position_embeddings,
dropout_prob=config.hidden_dropout_prob)
with tf.compat.v1.variable_scope("encoder"):
# This converts a 2D mask of shape [batch_size, seq_length] to a 3D
# mask of shape [batch_size, seq_length, seq_length] which is used
# for the attention scores.
attention_mask = create_attention_mask_from_input_mask(
input_ids, input_mask)
# Run the stacked transformer.
# `sequence_output` shape = [batch_size, seq_length, hidden_size].
self.all_encoder_layers = transformer_model(
input_tensor=self.embedding_output,
attention_mask=attention_mask,
hidden_size=config.hidden_size,
num_hidden_layers=config.num_hidden_layers,
num_attention_heads=config.num_attention_heads,
intermediate_size=config.intermediate_size,
intermediate_act_fn=get_activation(config.hidden_act),
hidden_dropout_prob=config.hidden_dropout_prob,
attention_probs_dropout_prob=config.attention_probs_dropout_prob,
initializer_range=config.initializer_range,
do_return_all_layers=True)
self.sequence_output = self.all_encoder_layers[-1]
# The "pooler" converts the encoded sequence tensor of shape
# [batch_size, seq_length, hidden_size] to a tensor of shape
# [batch_size, hidden_size]. This is necessary for segment-level
# (or segment-pair-level) classification tasks where we need a fixed
# dimensional representation of the segment.
with tf.compat.v1.variable_scope("pooler"):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token. We assume that this has been pre-trained
first_token_tensor = tf.squeeze(self.sequence_output[:, 0:1, :], axis=1)
self.pooled_output = tf.compat.v1.layers.dense(
first_token_tensor,
config.hidden_size,
activation=tf.tanh,
kernel_initializer=create_initializer(config.initializer_range))
def get_pooled_output(self):
return self.pooled_output
def get_sequence_output(self):
"""Gets final hidden layer of encoder.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the final hidden of the transformer encoder.
"""
return self.sequence_output
def get_all_encoder_layers(self):
return self.all_encoder_layers
def get_embedding_output(self):
"""Gets output of the embedding lookup (i.e., input to the transformer).
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size] corresponding
to the output of the embedding layer, after summing the word
embeddings with the positional embeddings and the token type embeddings,
then performing layer normalization. This is the input to the transformer.
"""
return self.embedding_output
def get_embedding_table(self):
return self.embedding_table
def gelu(x):
"""Gaussian Error Linear Unit.
This is a smoother version of the RELU.
Original paper: https://arxiv.org/abs/1606.08415
Args:
x: float Tensor to perform activation.
Returns:
`x` with the GELU activation applied.
"""
cdf = 0.5 * (1.0 + tf.tanh(
(np.sqrt(2 / np.pi) * (x + 0.044715 * tf.pow(x, 3)))))
return x * cdf
def get_activation(activation_string):
"""Maps a string to a Python function, e.g., "relu" => `tf.nn.relu`.
Args:
activation_string: String name of the activation function.
Returns:
A Python function corresponding to the activation function. If
`activation_string` is None, empty, or "linear", this will return None.
If `activation_string` is not a string, it will return `activation_string`.
Raises:
ValueError: The `activation_string` does not correspond to a known
activation.
"""
# We assume that anything that"s not a string is already an activation
# function, so we just return it.
if not isinstance(activation_string, six.string_types):
return activation_string
if not activation_string:
return None
act = activation_string.lower()
if act == "linear":
return None
elif act == "relu":
return tf.nn.relu
elif act == "gelu":
return gelu
elif act == "tanh":
return tf.tanh
else:
raise ValueError("Unsupported activation: %s" % act)
def get_assignment_map_from_checkpoint(tvars, init_checkpoint):
"""Compute the union of the current variables and checkpoint variables."""
assignment_map = {}
initialized_variable_names = {}
name_to_variable = collections.OrderedDict()
for var in tvars:
name = var.name
m = re.match("^(.*):\\d+$", name)
if m is not None:
name = m.group(1)
name_to_variable[name] = var
init_vars = tf.train.list_variables(init_checkpoint)
assignment_map = collections.OrderedDict()
for x in init_vars:
(name, var) = (x[0], x[1])
if name not in name_to_variable:
continue
assignment_map[name] = name
initialized_variable_names[name] = 1
initialized_variable_names[name + ":0"] = 1
return (assignment_map, initialized_variable_names)
def dropout(input_tensor, dropout_prob):
"""Perform dropout.
Args:
input_tensor: float Tensor.
dropout_prob: Python float. The probability of dropping out a value (NOT of
*keeping* a dimension as in `tf.nn.dropout`).
Returns:
A version of `input_tensor` with dropout applied.
"""
if dropout_prob is None or dropout_prob == 0.0:
return input_tensor
output = tf.nn.dropout(input_tensor, rate=dropout_prob)
return output
def layer_norm(input_tensor, name=None):
"""Run layer normalization on the last dimension of the tensor."""
# return tf.contrib.layers.layer_norm(
# inputs=input_tensor, begin_norm_axis=-1, begin_params_axis=-1, scope=name)
epsilon = 1e-12
input_shape = input_tensor.shape
with tf.compat.v1.variable_scope("LayerNorm"):
gamma = tf.compat.v1.get_variable(name="gamma", shape=input_shape[-1:], initializer=tf.compat.v1.initializers.ones(), trainable=True)
beta = tf.compat.v1.get_variable(name="beta", shape=input_shape[-1:], initializer=tf.compat.v1.initializers.zeros(), trainable=True)
x = input_tensor
if tf.__version__.startswith("2."):
mean, var = tf.nn.moments(x=x, axes=-1, keepdims=True)
else:
mean, var = tf.nn.moments(x, axes=-1, keep_dims=True)
inv = gamma * tf.math.rsqrt(var + epsilon)
res = x * tf.cast(inv, x.dtype) + tf.cast(beta - mean * inv, x.dtype)
return res
def layer_norm_and_dropout(input_tensor, dropout_prob, name=None):
"""Runs layer normalization followed by dropout."""
output_tensor = layer_norm(input_tensor, name)
output_tensor = dropout(output_tensor, dropout_prob)
return output_tensor
def create_initializer(initializer_range=0.02):
"""Creates a `truncated_normal_initializer` with the given range."""
return tf.compat.v1.initializers.truncated_normal(stddev=initializer_range)
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings",
use_one_hot_embeddings=False):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
use_one_hot_embeddings: bool. If True, use one-hot method for word
embeddings. If False, use `tf.gather()`.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.compat.v1.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
flat_input_ids = tf.reshape(input_ids, [-1])
if use_one_hot_embeddings:
one_hot_input_ids = tf.one_hot(flat_input_ids, depth=vocab_size)
output = tf.matmul(one_hot_input_ids, embedding_table)
else:
output = tf.gather(embedding_table, flat_input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return (output, embedding_table)
def embedding_postprocessor(input_tensor,
use_token_type=False,
token_type_ids=None,
token_type_vocab_size=16,
token_type_embedding_name="token_type_embeddings",
use_position_embeddings=True,
position_embedding_name="position_embeddings",
initializer_range=0.02,
max_position_embeddings=512,
dropout_prob=0.1):
"""Performs various post-processing on a word embedding tensor.
Args:
input_tensor: float Tensor of shape [batch_size, seq_length,
embedding_size].
use_token_type: bool. Whether to add embeddings for `token_type_ids`.
token_type_ids: (optional) int32 Tensor of shape [batch_size, seq_length].
Must be specified if `use_token_type` is True.
token_type_vocab_size: int. The vocabulary size of `token_type_ids`.
token_type_embedding_name: string. The name of the embedding table variable
for token type ids.
use_position_embeddings: bool. Whether to add position embeddings for the
position of each token in the sequence.
position_embedding_name: string. The name of the embedding table variable
for positional embeddings.
initializer_range: float. Range of the weight initialization.
max_position_embeddings: int. Maximum sequence length that might ever be
used with this model. This can be longer than the sequence length of
input_tensor, but cannot be shorter.
dropout_prob: float. Dropout probability applied to the final output tensor.
Returns:
float tensor with same shape as `input_tensor`.
Raises:
ValueError: One of the tensor shapes or input values is invalid.
"""
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
output = input_tensor
if use_token_type:
if token_type_ids is None:
raise ValueError("`token_type_ids` must be specified if"
"`use_token_type` is True.")
token_type_table = tf.compat.v1.get_variable(
name=token_type_embedding_name,
shape=[token_type_vocab_size, width],
initializer=create_initializer(initializer_range), use_resource=False)
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, token_type_table)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
output += token_type_embeddings
if use_position_embeddings:
assert_op = tf.compat.v1.assert_less_equal(seq_length, max_position_embeddings)
with tf.control_dependencies([assert_op]):
full_position_embeddings = tf.compat.v1.get_variable(
name=position_embedding_name,
shape=[max_position_embeddings, width],
initializer=create_initializer(initializer_range), use_resource=False)
# Since the position embedding table is a learned variable, we create it
# using a (long) sequence length `max_position_embeddings`. The actual
# sequence length might be shorter than this, for faster training of
# tasks that do not have long sequences.
#
# So `full_position_embeddings` is effectively an embedding table
# for position [0, 1, 2, ..., max_position_embeddings-1], and the current
# sequence has positions [0, 1, 2, ... seq_length-1], so we can just
# perform a slice.
position_embeddings = tf.slice(full_position_embeddings, [0, 0],
[seq_length, -1])
num_dims = len(output.shape.as_list())
# Only the last two dimensions are relevant (`seq_length` and `width`), so
# we broadcast among the first dimensions, which is typically just
# the batch size.
position_broadcast_shape = []
for _ in range(num_dims - 2):
position_broadcast_shape.append(1)
position_broadcast_shape.extend([seq_length, width])
position_embeddings = tf.reshape(position_embeddings,
position_broadcast_shape)
output += position_embeddings
output = layer_norm_and_dropout(output, dropout_prob)
return output
def create_attention_mask_from_input_mask(from_tensor, to_mask):
"""Create 3D attention mask from a 2D tensor mask.
Args:
from_tensor: 2D or 3D Tensor of shape [batch_size, from_seq_length, ...].
to_mask: int32 Tensor of shape [batch_size, to_seq_length].
Returns:
float Tensor of shape [batch_size, from_seq_length, to_seq_length].
"""
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_shape = get_shape_list(to_mask, expected_rank=2)
to_seq_length = to_shape[1]
to_mask = tf.cast(
tf.reshape(to_mask, [batch_size, 1, to_seq_length]), tf.float32)
# We don't assume that `from_tensor` is a mask (although it could be). We
# don't actually care if we attend *from* padding tokens (only *to* padding)
# tokens so we create a tensor of all ones.
#
# `broadcast_ones` = [batch_size, from_seq_length, 1]
broadcast_ones = tf.ones(
shape=[batch_size, from_seq_length, 1], dtype=tf.float32)
# Here we broadcast along two dimensions to create the mask.
mask = broadcast_ones * to_mask
return mask
def attention_layer(from_tensor,
to_tensor,
attention_mask=None,
num_attention_heads=1,
size_per_head=512,
query_act=None,
key_act=None,
value_act=None,
attention_probs_dropout_prob=0.0,
initializer_range=0.02,
do_return_2d_tensor=False,
batch_size=None,
from_seq_length=None,
to_seq_length=None):
"""Performs multi-headed attention from `from_tensor` to `to_tensor`.
This is an implementation of multi-headed attention based on "Attention
is all you Need". If `from_tensor` and `to_tensor` are the same, then
this is self-attention. Each timestep in `from_tensor` attends to the
corresponding sequence in `to_tensor`, and returns a fixed-with vector.
This function first projects `from_tensor` into a "query" tensor and
`to_tensor` into "key" and "value" tensors. These are (effectively) a list
of tensors of length `num_attention_heads`, where each tensor is of shape
[batch_size, seq_length, size_per_head].
Then, the query and key tensors are dot-producted and scaled. These are
softmaxed to obtain attention probabilities. The value tensors are then
interpolated by these probabilities, then concatenated back to a single
tensor and returned.
In practice, the multi-headed attention are done with transposes and
reshapes rather than actual separate tensors.
Args:
from_tensor: float Tensor of shape [batch_size, from_seq_length,
from_width].
to_tensor: float Tensor of shape [batch_size, to_seq_length, to_width].
attention_mask: (optional) int32 Tensor of shape [batch_size,
from_seq_length, to_seq_length]. The values should be 1 or 0. The
attention scores will effectively be set to -infinity for any positions in
the mask that are 0, and will be unchanged for positions that are 1.
num_attention_heads: int. Number of attention heads.
size_per_head: int. Size of each attention head.
query_act: (optional) Activation function for the query transform.
key_act: (optional) Activation function for the key transform.
value_act: (optional) Activation function for the value transform.
attention_probs_dropout_prob: (optional) float. Dropout probability of the
attention probabilities.
initializer_range: float. Range of the weight initializer.
do_return_2d_tensor: bool. If True, the output will be of shape [batch_size
* from_seq_length, num_attention_heads * size_per_head]. If False, the
output will be of shape [batch_size, from_seq_length, num_attention_heads
* size_per_head].
batch_size: (Optional) int. If the input is 2D, this might be the batch size
of the 3D version of the `from_tensor` and `to_tensor`.
from_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `from_tensor`.
to_seq_length: (Optional) If the input is 2D, this might be the seq length
of the 3D version of the `to_tensor`.
Returns:
float Tensor of shape [batch_size, from_seq_length,
num_attention_heads * size_per_head]. (If `do_return_2d_tensor` is
true, this will be of shape [batch_size * from_seq_length,
num_attention_heads * size_per_head]).
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
def transpose_for_scores(input_tensor, batch_size, num_attention_heads,
seq_length, width):
output_tensor = tf.reshape(
input_tensor, [batch_size, seq_length, num_attention_heads, width])
output_tensor = tf.transpose(a=output_tensor, perm=[0, 2, 1, 3])
return output_tensor
from_shape = get_shape_list(from_tensor, expected_rank=[2, 3])
to_shape = get_shape_list(to_tensor, expected_rank=[2, 3])
if len(from_shape) != len(to_shape):
raise ValueError(
"The rank of `from_tensor` must match the rank of `to_tensor`.")
if len(from_shape) == 3:
batch_size = from_shape[0]
from_seq_length = from_shape[1]
to_seq_length = to_shape[1]
elif len(from_shape) == 2:
if (batch_size is None or from_seq_length is None or to_seq_length is None):
raise ValueError(
"When passing in rank 2 tensors to attention_layer, the values "
"for `batch_size`, `from_seq_length`, and `to_seq_length` "
"must all be specified.")
# Scalar dimensions referenced here:
# B = batch size (number of sequences)
# F = `from_tensor` sequence length
# T = `to_tensor` sequence length
# N = `num_attention_heads`
# H = `size_per_head`
from_tensor_2d = reshape_to_matrix(from_tensor)
to_tensor_2d = reshape_to_matrix(to_tensor)
# `query_layer` = [B*F, N*H]
query_layer = tf.compat.v1.layers.dense(
from_tensor_2d,
num_attention_heads * size_per_head,
activation=query_act,
name="query",
kernel_initializer=create_initializer(initializer_range))
# `key_layer` = [B*T, N*H]
key_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=key_act,
name="key",
kernel_initializer=create_initializer(initializer_range))
# `value_layer` = [B*T, N*H]
value_layer = tf.compat.v1.layers.dense(
to_tensor_2d,
num_attention_heads * size_per_head,
activation=value_act,
name="value",
kernel_initializer=create_initializer(initializer_range))
# `query_layer` = [B, N, F, H]
query_layer = transpose_for_scores(query_layer, batch_size,
num_attention_heads, from_seq_length,
size_per_head)
# `key_layer` = [B, N, T, H]
key_layer = transpose_for_scores(key_layer, batch_size, num_attention_heads,
to_seq_length, size_per_head)
# Take the dot product between "query" and "key" to get the raw
# attention scores.
# `attention_scores` = [B, N, F, T]
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
attention_scores = tf.multiply(attention_scores,
1.0 / math.sqrt(float(size_per_head)))
if attention_mask is not None:
# `attention_mask` = [B, 1, F, T]
attention_mask = tf.expand_dims(attention_mask, axis=[1])
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
adder = (1.0 - tf.cast(attention_mask, tf.float32)) * -10000.0
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
attention_scores += adder
# Normalize the attention scores to probabilities.
# `attention_probs` = [B, N, F, T]
attention_probs = tf.nn.softmax(attention_scores)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = dropout(attention_probs, attention_probs_dropout_prob)
# `value_layer` = [B, T, N, H]
value_layer = tf.reshape(
value_layer,
[batch_size, to_seq_length, num_attention_heads, size_per_head])
# `value_layer` = [B, N, T, H]
value_layer = tf.transpose(a=value_layer, perm=[0, 2, 1, 3])
# `context_layer` = [B, N, F, H]
context_layer = tf.matmul(attention_probs, value_layer)
# `context_layer` = [B, F, N, H]
context_layer = tf.transpose(a=context_layer, perm=[0, 2, 1, 3])
if do_return_2d_tensor:
# `context_layer` = [B*F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size * from_seq_length, num_attention_heads * size_per_head])
else:
# `context_layer` = [B, F, N*H]
context_layer = tf.reshape(
context_layer,
[batch_size, from_seq_length, num_attention_heads * size_per_head])
return context_layer
def transformer_model(input_tensor,
attention_mask=None,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
intermediate_act_fn=gelu,
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
initializer_range=0.02,
do_return_all_layers=False):
"""Multi-headed, multi-layer Transformer from "Attention is All You Need".
This is almost an exact implementation of the original Transformer encoder.
See the original paper:
https://arxiv.org/abs/1706.03762
Also see:
https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/transformer.py
Args:
input_tensor: float Tensor of shape [batch_size, seq_length, hidden_size].
attention_mask: (optional) int32 Tensor of shape [batch_size, seq_length,
seq_length], with 1 for positions that can be attended to and 0 in
positions that should not be.
hidden_size: int. Hidden size of the Transformer.
num_hidden_layers: int. Number of layers (blocks) in the Transformer.
num_attention_heads: int. Number of attention heads in the Transformer.
intermediate_size: int. The size of the "intermediate" (a.k.a., feed
forward) layer.
intermediate_act_fn: function. The non-linear activation function to apply
to the output of the intermediate/feed-forward layer.
hidden_dropout_prob: float. Dropout probability for the hidden layers.
attention_probs_dropout_prob: float. Dropout probability of the attention
probabilities.
initializer_range: float. Range of the initializer (stddev of truncated
normal).
do_return_all_layers: Whether to also return all layers or just the final
layer.
Returns:
float Tensor of shape [batch_size, seq_length, hidden_size], the final
hidden layer of the Transformer.
Raises:
ValueError: A Tensor shape or parameter is invalid.
"""
if hidden_size % num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (hidden_size, num_attention_heads))
attention_head_size = int(hidden_size / num_attention_heads)
input_shape = get_shape_list(input_tensor, expected_rank=3)
batch_size = input_shape[0]
seq_length = input_shape[1]
input_width = input_shape[2]
# The Transformer performs sum residuals on all layers so the input needs
# to be the same as the hidden size.
if input_width != hidden_size:
raise ValueError("The width of the input tensor (%d) != hidden size (%d)" %
(input_width, hidden_size))
# We keep the representation as a 2D tensor to avoid re-shaping it back and
# forth from a 3D tensor to a 2D tensor. Re-shapes are normally free on
# the GPU/CPU but may not be free on the TPU, so we want to minimize them to
# help the optimizer.
prev_output = reshape_to_matrix(input_tensor)
all_layer_outputs = []
for layer_idx in range(num_hidden_layers):
with tf.compat.v1.variable_scope("layer_%d" % layer_idx):
layer_input = prev_output
with tf.compat.v1.variable_scope("attention"):
attention_heads = []
with tf.compat.v1.variable_scope("self"):
attention_head = attention_layer(
from_tensor=layer_input,
to_tensor=layer_input,
attention_mask=attention_mask,
num_attention_heads=num_attention_heads,
size_per_head=attention_head_size,
attention_probs_dropout_prob=attention_probs_dropout_prob,
initializer_range=initializer_range,
do_return_2d_tensor=True,
batch_size=batch_size,
from_seq_length=seq_length,
to_seq_length=seq_length)
attention_heads.append(attention_head)
attention_output = None
if len(attention_heads) == 1:
attention_output = attention_heads[0]
else:
# In the case where we have other sequences, we just concatenate
# them to the self-attention head before the projection.
attention_output = tf.concat(attention_heads, axis=-1)
# Run a linear projection of `hidden_size` then add a residual
# with `layer_input`.
with tf.compat.v1.variable_scope("output"):
attention_output = tf.compat.v1.layers.dense(
attention_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
attention_output = dropout(attention_output, hidden_dropout_prob)
attention_output = layer_norm(attention_output + layer_input)
# The activation is only applied to the "intermediate" hidden layer.
with tf.compat.v1.variable_scope("intermediate"):
intermediate_output = tf.compat.v1.layers.dense(
attention_output,
intermediate_size,
activation=intermediate_act_fn,
kernel_initializer=create_initializer(initializer_range))
# Down-project back to `hidden_size` then add the residual.
with tf.compat.v1.variable_scope("output"):
layer_output = tf.compat.v1.layers.dense(
intermediate_output,
hidden_size,
kernel_initializer=create_initializer(initializer_range))
layer_output = dropout(layer_output, hidden_dropout_prob)
layer_output = layer_norm(layer_output + attention_output)
prev_output = layer_output
all_layer_outputs.append(layer_output)
if do_return_all_layers:
final_outputs = []
for layer_output in all_layer_outputs:
final_output = reshape_from_matrix(layer_output, input_shape)
final_outputs.append(final_output)
return final_outputs
else:
final_output = reshape_from_matrix(prev_output, input_shape)
return final_output
def get_shape_list(tensor, expected_rank=None, name=None):
"""Returns a list of the shape of tensor, preferring static dimensions.
Args:
tensor: A tf.Tensor object to find the shape of.
expected_rank: (optional) int. The expected rank of `tensor`. If this is
specified and the `tensor` has a different rank, and exception will be
thrown.
name: Optional name of the tensor for the error message.
Returns:
A list of dimensions of the shape of tensor. All static dimensions will
be returned as python integers, and dynamic dimensions will be returned
as tf.Tensor scalars.
"""
if name is None:
name = tensor.name
if expected_rank is not None:
assert_rank(tensor, expected_rank, name)
shape = tensor.shape.as_list()
non_static_indexes = []
for (index, dim) in enumerate(shape):
if dim is None:
non_static_indexes.append(index)
if not non_static_indexes:
return shape
dyn_shape = tf.shape(input=tensor)
for index in non_static_indexes:
shape[index] = dyn_shape[index]
return shape
def reshape_to_matrix(input_tensor):
"""Reshapes a >= rank 2 tensor to a rank 2 tensor (i.e., a matrix)."""
ndims = input_tensor.shape.ndims
if ndims < 2:
raise ValueError("Input tensor must have at least rank 2. Shape = %s" %
(input_tensor.shape))
if ndims == 2:
return input_tensor
width = input_tensor.shape[-1]
output_tensor = tf.reshape(input_tensor, [-1, width])
return output_tensor
def reshape_from_matrix(output_tensor, orig_shape_list):
"""Reshapes a rank 2 tensor back to its original rank >= 2 tensor."""
if len(orig_shape_list) == 2:
return output_tensor
output_shape = get_shape_list(output_tensor)
orig_dims = orig_shape_list[0:-1]
width = output_shape[-1]
return tf.reshape(output_tensor, orig_dims + [width])
def assert_rank(tensor, expected_rank, name=None):
"""Raises an exception if the tensor rank is not of the expected rank.
Args:
tensor: A tf.Tensor to check the rank of.
expected_rank: Python integer or list of integers, expected rank.
name: Optional name of the tensor for the error message.
Raises:
ValueError: If the expected shape doesn't match the actual shape.
"""
if name is None:
name = tensor.name
expected_rank_dict = {}
if isinstance(expected_rank, six.integer_types):
expected_rank_dict[expected_rank] = True
else:
for x in expected_rank:
expected_rank_dict[x] = True
actual_rank = tensor.shape.ndims
if actual_rank not in expected_rank_dict:
scope_name = tf.compat.v1.get_variable_scope().name
raise ValueError(
"For the tensor `%s` in scope `%s`, the actual rank "
"`%d` (shape = %s) is not equal to the expected rank `%s`" %
(name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
| [
"[email protected]"
] | |
e8001a656cae6b21c00f398deca4b950fda490ed | ab98c033b4c2e80b304e9f77b740b6d545870b66 | /data_aggregation/CreationBDD2_3_aliases+GoTerms/researchDG.py | 86a42e0a2c3605a4904beaa95bb1e64303e6338a | [] | no_license | yannistannier/textmining-light | 503384c28f5fb4763293ced15337295685d84ba3 | 864210d127684d5af55336ceb8c0718d0f2c3e3c | refs/heads/master | 2020-04-14T23:37:38.751779 | 2019-01-07T09:10:50 | 2019-01-07T09:10:50 | 164,209,545 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,808 | py | from Bio import Entrez, SeqIO, Medline
import scipy.sparse as sp
import numpy as np
import pandas as pd
import sklearn
import sys
Entrez.email = "[email protected]"
def recupDictionnaires():
ens = []
file = open("dict.txt", "r")
doc = file.read()
dim = len(doc.split("##"))
i = 0
print ("STARTING RECUP")
for line in doc.split("##"):
if (i < dim-1):
symbol, aliases, name, diseases, goTerms, pubmedIds = line.split("|")
dico = {}
dico['symbol'] = symbol
dico['aliases'] = []
for alias in aliases.split(","):
if (not alias == ""):
dico['aliases'].append(alias)
dico['name'] = name
dico['diseases'] = []
for disease in diseases.split(","):
if (not disease == ""):
dico['diseases'].append(disease)
dico['goTerms'] = []
for goTerm in goTerms.split(","):
if (not goTerm == ""):
dico['goTerms'].append(goTerm)
dico['pubmedIds'] = []
for pubmedId in pubmedIds.split(","):
if (not pubmedId == ""):
dico['pubmedIds'].append(pubmedId)
ens.append(dico)
#print (dico)
i += 1
print ("END RECUP")
return ens
def pbmd_search(maladie,gene):
handle = Entrez.esearch(db = 'pubmed', term = maladie + " AND " + gene, retmax = '1000000000')
print (maladie + " AND " + gene)
result = Entrez.read(handle)
handle.close()
return(result)
def ecriture_file(maladie, gene, value):
output = open('output.txt', 'a')
output.write(maladie+"|" +gene+ "|" + str(value) + "##")
output.close()
def ecriture_end_dg():
output = open('output.txt', 'a')
output.write("@@")
output.close()
if __name__ == '__main__':
print("################### START SEARCHING ###################")
###### RECUPERATION DE LA LISTE DES DICTIONNAIRES DE GENES
ens = recupDictionnaires()
dim = len(ens)
print (dim)
###### OUVERTURE DU FIHCIER D'OUTPUT ET VERIFICATION DE SON CONTENU POUR REPRISE DE LA RECHERCHE
try :
file = open("output.txt", "r")
except IOError:
file = open("output.txt", "x")
file = open("output.txt", "r")
doc = file.read()
file.close()
lim = 0
already = doc.split("@@")
for line in already:
lim += 1
finalPart = already[-1]
print (finalPart)
print (lim)
delim = 0
for part in finalPart.split("##"):
delim += 1
i = 0
#while (i < dim):
###### POUR CHAQUE DICTIONNAIRE :
for dico in ens:
print ("Loading...")
print(str(i+1) + "/" + str(dim))
# S'IL EST DEJA ECRIT DANS LE FICHIER, PASSER AU SUIVANT
if (i < lim -1):
print ("... GENE : Already done")
# SINON :
# VERIFIER QUELLES MALADIES ONT DEJA ETE ECRITES
else :
print ("Preparing GENE...")
# PREPARATION DE LA REQUETE
genes = set([])
if (not len(dico['aliases']) == 0):
genes.update(dico['aliases'])
if (not len(dico['symbol']) == 0):
genes.add(dico['symbol'])
if (not len(dico['name']) == 0):
genes.add(dico['name'])
genes_string = " OR ".join(genes)
genes_string = "(" + genes_string + ")"
print ("GENE : ", genes_string)
nbD = len(dico['diseases'])
cptD = 0
#while (cptD < nbD):
##### POUR CHAQUE MALADIE
for disease in dico['diseases']:
# SI DEJA FAIT
if (cptD < delim -1):
print(str(cptD+1) + "/" + str(nbD))
print ("... DISEASE Already done")
# SI PAS ENCORE FAIT
else:
# REQUETE PUBMED
print ("SEARCHING DISEASE (" + str(cptD+1) + "/" + str(nbD) + ")" )
result = []
idList = set([])
result = pbmd_search(disease,genes_string)
idList.update(result['IdList'])
key = len(idList)
key = key + 1
print("PRINTING IN FILE ...")
ecriture_file(disease, dico['symbol'], key)
print ("***** OK !!")
cptD += 1
ecriture_end_dg()
delim = 0
i+=1
print("################### END SEARCHING ###################")
| [
"[email protected]"
] | |
049c12ca9c2ec403bf4f152a25d45aee9f1d0c8c | 6cb1bd6816af5964c82e127e9e28cd6d0fd5fd7d | /05-05finally.py | e54a7e88b941cc9bae40f8afbc18c38fba553199 | [] | no_license | jinju-lee/Python-study | 660f952b3c16d675147f870e1cab473177106636 | c226bcb2c501c49ac157b6d3a3d18e515f3011f8 | refs/heads/master | 2021-05-08T21:15:32.782635 | 2018-02-11T14:32:26 | 2018-02-11T14:32:26 | 119,610,114 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | try:
num =int(input('숫자를 입력하세요:'))
except ValueError:
print('숫자가 아닙니다.')
else:
print(num)
finally:
print('finally는 무조건 실행됩니다.')
| [
"[email protected]"
] | |
158affbd0c69bdb708234a227040c705d8a0b2f4 | 88853b9b6c3ae0c12cbd5bf83be3d48f2fe0159e | /document/eggs_package/gflux_egg/gflux/gflux/apps/station/management/commands/obselete/deal_with_shihua_none_fuel_data.py | e12cd0c2aed3e9ab1d6084886feb5e89e19b5a76 | [] | no_license | laoyin/nyf | 9151da3368c2e636501bcf1ad52f895fe446c04b | a2b12f54827b6088548f060881b6dafa2d791a3a | refs/heads/master | 2021-01-17T17:11:14.174968 | 2015-09-28T01:12:28 | 2015-09-28T01:12:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,588 | py | # coding=utf-8
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from django.core.management.base import BaseCommand
from dash.core.backends.sql.models import get_dash_session_maker
from gflux.apps.common.models import SiteDayBatch
from datetime import datetime
from optparse import make_option
import sys,pdb,re
from dash.core.utils import getPaymentTypeByCard
import hashlib
import xlrd
import random
from xlrd import xldate_as_tuple
class Command(BaseCommand):
help = 'Deal with None Fuel Data'
option_list = BaseCommand.option_list + (
make_option('--file',help="set file path",type="string"),
make_option('--save_path',help="save file path",type="string"),
)
def handle(self, *args, **options):
print 'start...'
save_path=options['save_path']
try:
trans_count=30150001
book=xlrd.open_workbook(options['file'],encoding_override='gb2312')
sheets=book.sheets()
for sheet in sheets:
nrows=sheet.nrows
for row_idx in xrange(nrows):
#忽略表头
if row_idx == 0:
continue
#最后一行的总计忽略掉
if row_idx==nrows-1:
return
row=sheet.row_values(row_idx)
with open(save_path+'/changjiangdao.txt','a') as lf:
site=eval(repr(row[1])[1:]).decode('gbk','ignore')
#处理unicode编码,并去掉文字后面到空格
site = site.decode('unicode-escape').rstrip()+','
trans_type='1,'
cardnum='0,'
payment_type='1000,'
timestamp = row[3]+','
barcode=str(row[11][0:11])+','
pay=str(row[38])+','
quantity=str(row[31])+','
desc=eval(repr(row[10])[1:]).decode('gbk','ignore')
desc=desc.decode('unicode-escape')+','
price=str(row[35])+','
unitname=row[12]+','
pump_id='0,'
trans_id=str(trans_count)+'\n'
trans_count+=1
lf.write(site+trans_type+cardnum+payment_type+timestamp+barcode+pay+quantity+desc+price+unitname+pump_id+trans_id)
print 'ok'
except Exception,e:
print e
print 'end...'
| [
"[email protected]"
] | |
38769bed99e3caf79b45b1c948a5142c38462485 | dee1aa5ce988f59165a8a651b28f471c468fff99 | /tributary/lazy/output/__init__.py | 3d73f9fd9636c043435f23c15dff8fc754280c14 | [
"Apache-2.0"
] | permissive | ceball/tributary | 04f22e57048a3cb0375b57cdb30e62f69cf4a380 | 5e30f90d1a5cc176c0f231f525d9dc5a81353925 | refs/heads/master | 2022-12-05T20:35:33.631468 | 2020-08-28T13:14:24 | 2020-08-28T13:14:24 | 291,319,040 | 0 | 0 | Apache-2.0 | 2020-08-29T17:39:39 | 2020-08-29T17:39:38 | null | UTF-8 | Python | false | false | 3,819 | py | from ..node import Node
def _print(node, cache=None):
if cache is None:
cache = {}
cache[id(node)] = node
ret = {node: []}
if node._dependencies:
for call, deps in node._dependencies.items():
# callable node
if hasattr(call, '_node_wrapper') and \
call._node_wrapper is not None:
val = call._node_wrapper._print(cache)
ret[node].append(val)
# args
for arg in deps[0]:
val = arg._print(cache)
ret[node].append(val)
# kwargs
for kwarg in deps[1].values():
val = kwarg._print(cache)
ret[node].append(val)
return ret
def Print(node):
return node._print({})
def Graph(node):
return node.print()
def GraphViz(node):
d = node.graph()
from graphviz import Digraph
dot = Digraph(node._name, strict=True)
dot.format = 'png'
def rec(nodes, parent):
for d in nodes:
if not isinstance(d, dict):
if d.isDirty():
dot.node(d._name, color='red', shape=d._graphvizshape)
dot.edge(d._name, parent._name, color='red')
else:
dot.node(d._name, shape=d._graphvizshape)
dot.edge(d._name, parent._name)
else:
for k in d:
if k.isDirty():
dot.node(k._name, color='red', shape=k._graphvizshape)
rec(d[k], k)
dot.edge(k._name, parent._name, color='red')
else:
dot.node(k._name, shape=k._graphvizshape)
rec(d[k], k)
dot.edge(k._name, parent._name)
for k in d:
if k.isDirty():
dot.node(k._name, color='red', shape=k._graphvizshape)
else:
dot.node(k._name, shape=k._graphvizshape)
rec(d[k], k)
return dot
def Dagre(node):
import ipydagred3 as dd3
G = dd3.Graph()
d = Graph(node)
def rec(nodes, parent):
for d in nodes:
if not isinstance(d, dict):
d._dd3g = G
if d.isDirty():
G.setNode(d._name, style='fill: #f00', shape="rect" if d._graphvizshape == "box" else d._graphvizshape)
# G.setEdge(d._name, parent._name, style='stroke: #f00')
else:
G.setNode(d._name, style='fill: #fff', shape="rect" if d._graphvizshape == "box" else d._graphvizshape)
G.setEdge(d._name, parent._name, style='stroke: #000')
else:
for k in d:
k._dd3g = G
if k.isDirty():
G.setNode(k._name, style='fill: #f00', shape="rect" if k._graphvizshape == "box" else k._graphvizshape)
rec(d[k], k)
# G.setEdge(k._name, parent._name, style='stroke: #f00')
else:
G.setNode(k._name, style='fill: #fff', shape="rect" if k._graphvizshape == "box" else k._graphvizshape)
rec(d[k], k)
G.setEdge(k._name, parent._name, style='stroke: #000')
for k in d:
k._dd3g = G
if k.isDirty():
G.setNode(k._name, style='fill: #f00', shape="rect" if k._graphvizshape == "box" else k._graphvizshape)
else:
G.setNode(k._name, style='fill: #fff', shape="rect" if k._graphvizshape == "box" else k._graphvizshape)
rec(d[k], k)
graph = dd3.DagreD3Widget(graph=G)
return graph
Node._print = _print
Node.print = Print
Node.graph = Graph
Node.graphviz = GraphViz
Node.dagre = Dagre
| [
"[email protected]"
] | |
7e5da0fbf908161bc4084fef3c8bf28c92b54ad9 | c868d681415d152ba331bd80e0ed542832f20f0e | /week 3/todo_project/todo_project/main/migrations/0005_auto_20200205_2301.py | 819409651f247514434443d5e5acf7e6a3948904 | [] | no_license | Yeldarmt/BFDjango | a297a6b0c00ffb1a269f05c7e6665c5d34a51097 | b8256ff1d5f2125495df66eabf267fc17e667aeb | refs/heads/master | 2022-11-30T12:45:17.356453 | 2020-04-19T16:50:26 | 2020-04-19T16:50:26 | 233,515,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 906 | py | # Generated by Django 2.0 on 2020-02-05 17:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0004_auto_20200203_1126'),
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.TextField()),
('completed', models.BooleanField(default=False)),
],
),
migrations.DeleteModel(
name='Employee',
),
migrations.RemoveField(
model_name='myuser',
name='name',
),
migrations.RemoveField(
model_name='myuser',
name='sur_name',
),
]
| [
"[email protected]"
] | |
3467e9fbec6ceb28a2b2a98d25b2a0dbb03e4122 | 78e60a7d8a67ed76244004e8a3ed573fbf396e41 | /samples/sq__unbind_skill.py | 5e488f7e1a0d6a6147070822fc075df323fa31d5 | [
"MIT"
] | permissive | Crivez/apiclient-python | 837a9f7cc0453ccd3121311adc7920b5fe6b3e33 | 860fc054f546152a101e29b1af388c381075ac47 | refs/heads/master | 2023-06-08T13:24:09.249704 | 2021-06-17T12:16:35 | 2021-06-17T12:16:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 490 | py | from voximplant.apiclient import VoximplantAPI, VoximplantException
if __name__ == "__main__":
voxapi = VoximplantAPI("credentials.json")
# Unbind the skill with id = 1 from the user with id = 1.
APPLICATION_ID = 1
USER_ID = 1
SQ_SKILL_ID = 1
try:
res = voxapi.sq__unbind_skill(APPLICATION_ID,
USER_ID,
SQ_SKILL_ID)
print(res)
except VoximplantException as e:
print("Error: {}".format(e.message))
| [
"[email protected]"
] | |
21282b4075722ef249ada742b5404b049ef993c0 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/diagnostic/migrations/0014_auto_20180427_1159.py | 32ba48033c717f5b1bc268bb1082d320a0021623 | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 993 | py | # Generated by Django 2.0.2 on 2018-04-27 06:29
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('diagnostic', '0013_auto_20180426_1939'),
]
operations = [
migrations.AddField(
model_name='lab',
name='onboarding_status',
field=models.PositiveSmallIntegerField(choices=[(1, 'Not Onboarded'), (2, 'Onboarding Request Sent'), (3, 'Onboarded')], default=1),
),
migrations.AddField(
model_name='labonboardingtoken',
name='email',
field=models.EmailField(blank=True, max_length=100),
),
migrations.AddField(
model_name='labonboardingtoken',
name='mobile',
field=models.BigIntegerField(blank=True, null=True, validators=[django.core.validators.MaxValueValidator(9999999999), django.core.validators.MinValueValidator(1000000000)]),
),
]
| [
"[email protected]"
] | |
91a320683b315b2ec3f1fff36159fbc626d0fef6 | 2ccb6448e4783275350a388f2b71ace5c68a8024 | /mars/dataframe/reduction/custom_reduction.py | c08a1e6da4ecdc13fc077ff6f87e6c0a2c1dcdf8 | [
"BSD-3-Clause",
"OFL-1.1",
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | songroger/mars | 3fb286513d039944134c84c3e56f5d23cbe4562a | ae79f70599b5253418a05daed88ae835b8303649 | refs/heads/master | 2021-06-20T17:13:58.186858 | 2021-01-25T02:53:44 | 2021-01-25T02:53:44 | 166,158,684 | 2 | 0 | Apache-2.0 | 2019-01-17T04:12:10 | 2019-01-17T04:12:09 | null | UTF-8 | Python | false | false | 1,730 | py | # Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ... import opcodes as OperandDef
from ...config import options
from ...core import OutputType
from ...serialize import AnyField
from .core import DataFrameReductionOperand, DataFrameReductionMixin
class DataFrameCustomReduction(DataFrameReductionOperand, DataFrameReductionMixin):
_op_type_ = OperandDef.CUSTOM_REDUCTION
_func_name = 'custom_reduction'
_custom_reduction = AnyField('custom_reduction')
def __init__(self, custom_reduction=None, **kw):
super().__init__(_custom_reduction=custom_reduction, **kw)
@property
def custom_reduction(self):
return self._custom_reduction
@property
def is_atomic(self):
return True
def get_reduction_args(self, axis=None):
return dict()
def build_custom_reduction_result(df, custom_reduction_obj):
use_inf_as_na = options.dataframe.mode.use_inf_as_na
output_type = OutputType.series if df.ndim == 2 else OutputType.scalar
op = DataFrameCustomReduction(custom_reduction=custom_reduction_obj, output_types=[output_type],
use_inf_as_na=use_inf_as_na)
return op(df)
| [
"[email protected]"
] | |
455de03e8f49274c758ae023bb63a631d4a1a7be | 0b1e404a165c960677d07015bc26aac0569cf84a | /src/combustion/models/efficientdet.py | 9ad75fdd19fa31300cf789c8bae8a09322c9b3c9 | [
"Apache-2.0"
] | permissive | johndpope/combustion | d3ec349cd7be086f55b4e3deebd571c97842e1ed | c3f91e62a10a873cfeeae8c675b0683bc5158818 | refs/heads/master | 2023-03-01T14:34:42.149415 | 2021-02-07T17:55:58 | 2021-02-13T17:17:23 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,419 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from copy import deepcopy
from typing import Any, Dict, List, Optional
import torch
import torch.nn as nn
from torch import Tensor
from combustion.nn import BiFPN1d, BiFPN2d, BiFPN3d, MatchShapes, MobileNetBlockConfig
from .efficientnet import _EfficientNet
class _EfficientDetMeta(type):
def __new__(cls, name, bases, dct):
x = super().__new__(cls, name, bases, dct)
if "3d" in name:
x.Conv = nn.Conv3d
x.BatchNorm = nn.BatchNorm3d
x.BiFPN = BiFPN3d
x._get_blocks = MobileNetBlockConfig.get_3d_blocks
elif "2d" in name:
x.Conv = nn.Conv2d
x.BatchNorm = nn.BatchNorm2d
x.BiFPN = BiFPN2d
x._get_blocks = MobileNetBlockConfig.get_2d_blocks
elif "1d" in name:
x.Conv = nn.Conv1d
x.BatchNorm = nn.BatchNorm1d
x.BiFPN = BiFPN1d
x._get_blocks = MobileNetBlockConfig.get_1d_blocks
else:
raise RuntimeError(f"Metaclass: error processing name {cls.__name__}")
return x
class _EfficientDet(_EfficientNet):
__constants__ = ["fpn_levels"]
def __init__(
self,
block_configs: List[MobileNetBlockConfig],
fpn_levels: List[int] = [3, 5, 7, 8, 9],
fpn_filters: int = 64,
fpn_repeats: int = 3,
width_coeff: float = 1.0,
depth_coeff: float = 1.0,
width_divisor: float = 8.0,
min_width: Optional[int] = None,
stem: Optional[nn.Module] = None,
head: Optional[nn.Module] = None,
fpn_kwargs: dict = {},
):
super(_EfficientDet, self).__init__(
block_configs, width_coeff, depth_coeff, width_divisor, min_width, stem, head
)
self.fpn_levels = fpn_levels
# convolutions mapping backbone feature maps to constant number of channels
fpn_convs = []
output_filters = self.round_filters(fpn_filters, 1.0, width_divisor, min_width)
self.__fpn_filters = output_filters
for i, config in enumerate(self.block_configs):
if i + 1 in fpn_levels:
input_filters = config.output_filters
conv = self.Conv(input_filters, output_filters, kernel_size=1)
fpn_convs.append(conv)
for i in fpn_levels:
if i == len(self.block_configs) + 1:
input_filters = self.block_configs[-1].output_filters
conv = self.Conv(input_filters, output_filters, kernel_size=3, stride=2, padding=1)
fpn_convs.append(conv)
elif i > len(self.block_configs) + 1:
input_filters = output_filters
conv = self.Conv(input_filters, output_filters, kernel_size=3, stride=2, padding=1)
fpn_convs.append(nn.Sequential(nn.ReLU(), conv))
self.fpn_convs = nn.ModuleList(fpn_convs)
self.match = MatchShapes()
# defaults for batch norm params
_ = {"bn_momentum": 0.01, "bn_epsilon": 1e-3}
_.update(fpn_kwargs)
fpn_kwargs = _
# build bifpn
bifpn_layers = []
for i in range(fpn_repeats):
bifpn = self.BiFPN(output_filters, levels=len(fpn_levels), **fpn_kwargs)
bifpn_layers.append(bifpn)
self.bifpn_layers = nn.ModuleList(bifpn_layers)
@torch.jit.unused
@property
def fpn_filters(self) -> int:
r"""Number of filters in each level of the BiFPN. When using a custom head, use this
property to determine the number of filters in the head's input.
"""
return self.__fpn_filters
def extract_features(self, inputs: Tensor) -> List[Tensor]:
r"""Runs the EfficientDet stem and body to extract features, returning a list of
tensors representing features extracted from each block.
Args:
inputs (:class:`torch.Tensor`):
Model inputs
"""
# efficientnet feature extractor
backbone_features: List[Tensor] = []
x = self.stem(inputs)
prev_x = x
for block in self.blocks:
x = block(prev_x)
backbone_features.append(x)
prev_x = x
# pull out feature maps to be used in BiFPN
captured_features: List[Tensor] = []
for i in self.fpn_levels:
if i - 1 < len(backbone_features):
captured_features.append(backbone_features[i - 1])
# map to constant channel number using trivial convs
for i, conv in enumerate(self.fpn_convs):
if i < len(captured_features):
captured_features[i] = conv(captured_features[i])
else:
prev_x = conv(prev_x)
captured_features.append(prev_x)
for bifpn in self.bifpn_layers:
captured_features = bifpn(captured_features)
return captured_features
def forward(self, inputs: Tensor) -> List[Tensor]:
r"""Runs the entire EfficientDet model, including stem, body, and head.
If no head was supplied, the output of :func:`extract_features` will be returned.
Otherwise, the output of the given head will be returned.
.. note::
The returned output will always be a list of tensors. If a custom head is given
and it returns a single tensor, that tensor will be wrapped in a list before
being returned.
Args:
inputs (:class:`torch.Tensor`):
Model inputs
"""
output = self.extract_features(inputs)
if self.head is not None:
output = self.head(output)
if not isinstance(output, list):
output = [
output,
]
return output
@classmethod
def from_predefined(cls, compound_coeff: int, block_overrides: Dict[str, Any] = {}, **kwargs) -> "_EfficientDet":
r"""Creates an EfficientDet model using one of the parameterizations defined in the
`EfficientDet paper`_.
Args:
compound_coeff (int):
Compound scaling parameter :math:`\phi`. For example, to construct EfficientDet-D0, set
``compound_coeff=0``.
block_overrides (dict):
Overrides to be applied to each :class:`combustion.nn.MobileNetBlockConfig`.
**kwargs:
Additional parameters/overrides for model constructor.
.. _EfficientNet paper:
https://arxiv.org/abs/1905.11946
"""
# from paper
alpha = 1.2
beta = 1.1
width_divisor = 8.0
depth_coeff = alpha ** compound_coeff
width_coeff = beta ** compound_coeff
fpn_filters = int(64 * 1.35 ** compound_coeff)
fpn_repeats = 3 + compound_coeff
fpn_levels = [3, 5, 7, 8, 9]
# apply config overrides at each block
block_configs = deepcopy(cls.DEFAULT_BLOCKS)
for k, v in block_overrides.items():
for config in block_configs:
setattr(config, str(k), v)
final_kwargs = {
"block_configs": block_configs,
"width_coeff": width_coeff,
"depth_coeff": depth_coeff,
"width_divisor": width_divisor,
"fpn_filters": fpn_filters,
"fpn_repeats": fpn_repeats,
"fpn_levels": fpn_levels,
}
final_kwargs.update(kwargs)
result = cls(**final_kwargs)
result.compound_coeff = compound_coeff
return result
class EfficientDet1d(_EfficientDet, metaclass=_EfficientDetMeta):
pass
class EfficientDet2d(_EfficientDet, metaclass=_EfficientDetMeta):
r"""Implementation of EfficientDet as described in the `EfficientDet paper`_.
EfficientDet is built on an EfficientNet backbone
(see :class:`combustion.models.EfficientNet2d` for details). EfficientDet adds a
bidirectional feature pyramid network (see :class:`combustion.nn.BiFPN2d`), which
mixes information across the various feature maps produced by the EfficientNet backbone.
.. image:: ./efficientdet.png
:width: 800px
:align: center
:height: 300px
:alt: Diagram of EfficientDet
The authors of EfficientDet used the default EfficientNet scaling parameters for the backbone:
.. math::
\alpha = 1.2 \\
\beta = 1.1 \\
\gamma = 1.15
The BiFPN was scaled as follows:
.. math::
W_\text{bifpn} = 64 \cdot \big(1.35^\phi\big) \\
D_\text{bifpn} = 3 + \phi
In the original EfficientDet implementation, the authors extract feature maps from levels
3, 5, and 7 of the backbone. Two additional coarse levels are created by performing additional
strided convolutions to the final level in the backbone, for a total of 5 levels in the BiFPN.
.. note::
Currently, DropConnect ratios are not scaled based on depth of the given block.
This is a deviation from the true EfficientNet implementation.
Args:
block_configs (list of :class:`combustion.nn.MobileNetBlockConfig`)
Configs for each of the :class:`combustion.nn.MobileNetConvBlock2d` blocks
used in the model.
fpn_levels (list of ints):
Indicies of EfficientNet feature levels to include in the BiFPN, starting at index 1.
Values in ``fpn_levels`` greater than the total number of blocks in the backbone denote
levels that should be created by applying additional strided convolutions to the final
level in the backbone.
fpn_filters (int):
Number of filters to use for the BiFPN. The filter count given here should be the desired
number of filters after width scaling.
fpn_repeats (int):
Number of repeats to use for the BiFPN. The repeat count given here should be the desired
number of repeats after depth scaling.
width_coeff (float):
The width scaling coefficient. Increasing this increases the width of the model.
depth_coeff (float):
The depth scaling coefficient. Increasing this increases the depth of the model.
width_divisor (float):
Used in calculating number of filters under width scaling. Filters at each block
will be a multiple of ``width_divisor``.
min_width (int):
The minimum width of the model at any block
stem (:class:`torch.nn.Module`):
An optional stem to use for the model. The default stem is a single
3x3/2 conolution that expects 3 input channels.
head (:class:`torch.nn.Module`):
An optional head to use for the model. By default, no head will be used
and ``forward`` will return a list of tensors containing extracted features.
fpn_kwargs (dict):
Keyword args to be passed to all :class:`combustion.nn.BiFPN2d` layers.
Shapes
* Input: :math:`(N, C, H, W)`
* Output: List of tensors of shape :math:`(N, C, H', W')`, where height and width vary
depending on the amount of downsampling for that feature map.
.. _EfficientDet paper:
https://arxiv.org/abs/1911.09070
"""
class EfficientDet3d(_EfficientDet, metaclass=_EfficientDetMeta):
pass
| [
"[email protected]"
] | |
04bd04acd5e7aa633add08bc3d16d2ce6aaab1c1 | b94d30af18ef5cb1b13ce023b0e6be5aac8f454f | /venv/lib/python3.6/encodings/cp1255.py | f029cb39129fde78e455f1fcf51b313418fccd11 | [] | no_license | Gitlittlerubbish/SNS | 18be94122f15875a55b39d6e55fee821a9b89e7e | 84355e38e1f1b072f04b11f55a6cac958c63638d | refs/heads/master | 2020-12-14T14:49:58.041805 | 2020-03-12T15:00:25 | 2020-03-12T15:00:25 | 234,754,962 | 0 | 3 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | /Users/chenxiao/.pyenv/versions/3.6.6/lib/python3.6/encodings/cp1255.py | [
"[email protected]"
] | |
b255bd1e4fb2df23e823fb53929c10a3c852f996 | 98801e91bf1a78c5903449082113ecc154cd020e | /src/dron/notify/ntfy_desktop.py | 369cf3cf6b8d8ba4eea5002b5597032c988ce1d5 | [] | no_license | karlicoss/dron | bcec62e3602fa12134fdb6b86cc54f839086eba5 | 395d8a259b083b86f3128240bfa8f905fa255921 | refs/heads/master | 2023-06-10T07:12:20.799184 | 2023-06-04T23:41:33 | 2023-06-04T23:48:07 | 236,066,875 | 39 | 2 | null | 2023-06-04T23:48:08 | 2020-01-24T19:14:14 | Python | UTF-8 | Python | false | false | 302 | py | #!/usr/bin/env python3
from .common import get_parser, IS_SYSTEMD
from .ntfy_common import run_ntfy
BACKEND = 'linux' if IS_SYSTEMD else 'darwin'
def main() -> None:
p = get_parser()
args = p.parse_args()
run_ntfy(job=args.job, backend=BACKEND)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a3afa5a2a3d5d7657e6d618c5fd7fedd42af7d4e | a550aece79bda789826b463280b91abffbf2d372 | /django_projects/chat_app_channels/chatapp/chat/consumers.py | f0aa98c0798399935500a605d8bf556123c8a97f | [
"MIT"
] | permissive | phiratio/learn_python | 20376470eaa292c157fd01f52b3077e3a983cd5a | a32240d4355fb331805d515f96e1d009914e5c47 | refs/heads/master | 2022-11-27T07:07:45.712373 | 2020-12-03T22:04:31 | 2020-12-03T22:04:31 | 189,397,679 | 1 | 0 | MIT | 2022-11-22T04:40:27 | 2019-05-30T10:56:10 | Python | UTF-8 | Python | false | false | 1,288 | py | import json
from channels.generic.websocket import AsyncWebsocketConsumer
class ChatConsumer(AsyncWebsocketConsumer):
async def connect(self):
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = 'chat_%s' % self.room_name
# Join room group
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
# Leave room group
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
# Receive message from WebSocket
async def receive(self, text_data):
text_data_json = json.loads(text_data)
message = text_data_json['message']
# Send message to room group
await self.channel_layer.group_send(
self.room_group_name,
{
'type': 'chat_message',
'message': message
}
)
# Receive message from room group
async def chat_message(self, event):
message = event['message']
# Send message to WebSocket
await self.send(text_data=json.dumps({
'message': message
}))
| [
"[email protected]"
] | |
30df387384a195945b78fe44a457618949568134 | 98e761a1702351df3b3db91e4ee832ae25d213d1 | /test/db_predict2.py | 0c33754dac31c6939eb33c91f9c38e337e28bad6 | [] | no_license | jack139/face-test | ed637fdabace49c969dac8abbd12d2e80c589fec | 3907bf1e84c1e346b4429da0e8ca919ca6404098 | refs/heads/master | 2023-01-18T18:33:24.812823 | 2020-11-23T13:32:22 | 2020-11-23T13:32:22 | 315,326,106 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,085 | py | # -*- coding: utf-8 -*-
# 使用两个算法模型并行识别
import os, sys
import base64
#from datetime import datetime
from models.predict_plus import predict_parallel, predict_thread_db
if __name__ == "__main__":
if len(sys.argv)<4:
print("usage: python3 %s <knn|keras> <group_id> <test dir or file>" % sys.argv[0])
sys.exit(2)
#from facelib import api_func
classifier = sys.argv[1]
group_id = sys.argv[2]
test_thing = sys.argv[3]
if classifier not in ['knn', 'keras']:
print('invalid classifier!')
sys.exit(3)
if os.path.isdir(test_thing):
images = os.listdir(test_thing)
images = [os.path.join(test_thing, i) for i in images]
else:
images = [ test_thing ]
# Using the trained classifier, make predictions for unknown images
for image_file in images:
print("Looking for faces in {}".format(image_file))
with open(image_file, 'rb') as f:
image_data = f.read()
image_b64 = base64.b64encode(image_data)
# Find all people in the image using a trained classifier model
# Note: You can pass in either a classifier file name or a classifier model instance
#predictions = api_func.face_search('', image_b64, group_id)
#start_time = datetime.now()
predictions = predict_parallel(predict_thread_db, image_b64, group_id, classifier=classifier)
#print('[Time taken: {!s}]'.format(datetime.now() - start_time))
# Print results on the console
for name, (top, right, bottom, left), distance, count in predictions:
print("- Found {} at ({}, {}), distance={}, count={}".format(name, left, top, distance, count))
#for i in predictions:
# print("- Found {} at {}, distance={}".format(i['user_id'], i['location'], i['score']))
if len(predictions)==0:
print('Face not found!')
#print(predictions)
# Display results overlaid on an image
#knn.show_prediction_labels_on_image(image_file, predictions)
| [
"[email protected]"
] | |
2ddf079941f4eeee653c7ce2ed639d720e32d599 | 2e10e4f2d5372a82e63377813ff765b876624c30 | /promenade/builder.py | 394e14f7e4861ddd1e536805ad94025d33734eb2 | [
"Apache-2.0"
] | permissive | chnyda/airship-promenade | 6ecdda3def775810733d41c88a4ce0391eaf7739 | 006f1b790772aa7f08852f2409d4c125e1c9f423 | refs/heads/master | 2020-03-20T01:01:11.409606 | 2018-06-20T07:17:36 | 2018-06-20T07:17:36 | 137,064,477 | 0 | 0 | null | 2018-06-12T11:52:41 | 2018-06-12T11:52:41 | null | UTF-8 | Python | false | false | 4,512 | py | from . import logging, renderer
import io
import itertools
import os
import requests
import stat
import tarfile
__all__ = ['Builder']
LOG = logging.getLogger(__name__)
class Builder:
def __init__(self, config, *, validators=False):
self.config = config
self.validators = validators
self._file_cache = None
@property
def file_cache(self):
if not self._file_cache:
self._build_file_cache()
return self._file_cache
def _build_file_cache(self):
self._file_cache = {}
for file_spec in self._file_specs:
path = file_spec['path']
if 'content' in file_spec:
data = file_spec['content']
elif 'tar_url' in file_spec:
data = _fetch_tar_content(
url=file_spec['tar_url'], path=file_spec['tar_path'])
self._file_cache[path] = {
'path': path,
'data': data,
'mode': file_spec['mode'],
}
@property
def _file_specs(self):
return itertools.chain(
self.config.get_path('HostSystem:files', []),
self.config.get_path('Genesis:files', []))
def build_all(self, *, output_dir):
self.build_genesis(output_dir=output_dir)
for node_document in self.config.iterate(
schema='promenade/KubernetesNode/v1'):
self.build_node(node_document, output_dir=output_dir)
if self.validators:
validate_script = renderer.render_template(
self.config, template='scripts/validate-cluster.sh')
_write_script(output_dir, 'validate-cluster.sh', validate_script)
def build_genesis(self, *, output_dir):
LOG.info('Building genesis script')
sub_config = self.config.extract_genesis_config()
tarball = renderer.build_tarball_from_roles(
config=sub_config,
roles=['common', 'genesis'],
file_specs=self.file_cache.values())
script = renderer.render_template(
sub_config,
template='scripts/genesis.sh',
context={
'tarball': tarball
})
_write_script(output_dir, 'genesis.sh', script)
if self.validators:
validate_script = renderer.render_template(
sub_config, template='scripts/validate-genesis.sh')
_write_script(output_dir, 'validate-genesis.sh', validate_script)
def build_node(self, node_document, *, output_dir):
node_name = node_document['metadata']['name']
LOG.info('Building script for node %s', node_name)
script = self.build_node_script(node_name)
_write_script(output_dir, _join_name(node_name), script)
if self.validators:
validate_script = self._build_node_validate_script(node_name)
_write_script(output_dir, 'validate-%s.sh' % node_name,
validate_script)
def build_node_script(self, node_name):
sub_config = self.config.extract_node_config(node_name)
file_spec_paths = [
f['path'] for f in self.config.get_path('HostSystem:files', [])
]
file_specs = [self.file_cache[p] for p in file_spec_paths]
tarball = renderer.build_tarball_from_roles(
config=sub_config, roles=['common', 'join'], file_specs=file_specs)
return renderer.render_template(
sub_config,
template='scripts/join.sh',
context={
'tarball': tarball
})
def _build_node_validate_script(self, node_name):
sub_config = self.config.extract_node_config(node_name)
return renderer.render_template(
sub_config, template='scripts/validate-join.sh')
def _fetch_tar_content(*, url, path):
LOG.debug('Fetching url=%s (tar path=%s)', url, path)
response = requests.get(url)
response.raise_for_status()
LOG.debug('Finished downloading url=%s (tar path=%s)', url, path)
f = io.BytesIO(response.content)
tf = tarfile.open(fileobj=f, mode='r')
buf_reader = tf.extractfile(path)
return buf_reader.read()
def _join_name(node_name):
return 'join-%s.sh' % node_name
def _write_script(output_dir, name, script):
path = os.path.join(output_dir, name)
with open(path, 'w') as f:
f.write(script)
os.chmod(
path,
os.stat(path).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
| [
"[email protected]"
] | |
3aaa90180fa18a62d598af790848d69ced4297d4 | 56be7f6b6a1243c532af9ea98310ccea165a1e66 | /day9/课件/2-并发编程/线程/1.线程.py | 4c93970dcacc26f1ea5fd6bbb63b6619f8af13fe | [] | no_license | 214031230/Python21 | 55b0405ec4ad186b052cde7ebfb3f4bb636a3f30 | d7fc68d3d23345df5bfb09d4a84686c8b49a5ad7 | refs/heads/master | 2021-05-26T06:00:53.393577 | 2019-01-09T02:29:04 | 2019-01-09T02:29:04 | 127,778,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,141 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 什么是进程 :是计算机资源分配的最小单位
# 什么是线程
# 线程和进程的关系 :
# 每一个进程中都至少有一个线程
# python中线程的特点
# 其他语言中线程的特点
# import os
# import time
# from threading import Thread
# n = 100
# def func(i):
# global n
# time.sleep(1)
# n -= 1
# print(os.getpid(),'thread%s'%i)
# t_l = []
# for i in range(100):
# t = Thread(target=func,args=(i,))
# t.start()
# t_l.append(t)
# for t in t_l:t.join()
# print('main : ',n)
# 每个进程里至少有一个主线程负责执行代码
# 在主线程中可以再开启一个新的线程
# 在同一个进程中就有两个线程同时在工作了
# 线程才是CPU调度的最小单位
# 多个线程之间的数据时共享的
# GIL锁 全局解释器锁
# 解释器的锅 Cpython解释器的问题
# 在同一个进程中 同一个时刻 只能有一个线程被CPU执行
# 导致高计算型 代码 不适合用python的多线程来解决
# 用多进程或者分布式来解决高计算型代码
| [
"[email protected]"
] | |
07cde02db4967803b942d8b688c42a2d75a4dfd1 | d8ff8d809fcff5f8370e317d837485648cc6ac9b | /repr_test.py | 7a91cc106da55a3a694d406c14e6e30f848cef3e | [] | no_license | xiabofei/python_details | c9b1ebfdc9574201b8ac21ebd8aa5e0e8442d3de | 1d6950d0fc32997e6f6e6cb269cd1ef4bb233c2f | refs/heads/master | 2020-04-02T06:35:05.659746 | 2019-04-05T06:11:58 | 2019-04-05T06:11:58 | 60,343,232 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 231 | py | #encoding=utf8
"""
werkzeug/routing/
Rule
__repr__方法
直接print某个对象时 打印出的信息
"""
class Test(object):
def __repr__(self):
return "<%s, called>" % (self.__class__.__name__)
t = Test()
print t
| [
"[email protected]"
] | |
f99d9eb488f96ead2b6615f8f842d81f126d62a7 | 1c21fa248091e31c362b95afafc5021211e85e63 | /invensis_pmc/customer/migrations/0011_remove_customer_services_required.py | f754435ba5eeda7264f3ca535292b5a98b54cd81 | [] | no_license | anudeepnaidu95/dev5 | 3d3252a51fccbb794e78a91681708e1b3c1ce0d4 | 7351244b79be242aa2cad36dbe1adca22a744edc | refs/heads/master | 2021-01-20T12:28:07.286078 | 2017-05-05T11:08:37 | 2017-05-05T11:08:37 | 90,365,863 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-14 14:19
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('customer', '0010_auto_20160714_1930'),
]
operations = [
migrations.RemoveField(
model_name='customer',
name='services_required',
),
]
| [
"[email protected]"
] | |
28d0f572052e6a3a1f08040ed8fa0e520b19b7e6 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.2/tests/regressiontests/forms/localflavor/__init__.py | dc478b681fdfd63cab664cfadf345bae3969f039 | [] | no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.2/tests/regressiontests/forms/localflavor/__init__.py | [
"[email protected]"
] | |
acd99f69d096e095274ab74784eeb2d609a3a1d9 | 2b6df7c9f1ffbda9d46eda14a62010dac6cfe6da | /app/utils.py | 9802bc03b16c95b262732b9479c811f1203dca51 | [] | no_license | its-arpit/tranageapp | 355e03a362fe14f2cd992b4fa3021806bc4cc4e9 | 657859135f492cb0f58b532671ee799060aa5afa | refs/heads/master | 2023-06-04T12:54:53.956808 | 2021-06-11T16:10:23 | 2021-06-11T16:10:23 | 376,124,298 | 0 | 0 | null | 2021-06-11T19:41:13 | 2021-06-11T19:30:24 | null | UTF-8 | Python | false | false | 583 | py | from django.shortcuts import render
from django.http import HttpResponse
from django.core.mail import send_mail
import math, random
def generateOTP() :
digits = "0123456789"
OTP = ""
for i in range(4) :
OTP += digits[math.floor(random.random() * 10)]
return OTP
def send_email_otp(request):
email=request.GET.get ("email")
print(email)
o=generateOTP()
htmlgen = '<p>Your OTP is <strong>o</strong></p>'
send_mail('OTP request',o,'<your gmail id>',[email], fail_silently=False, html_message=htmlgen)
return HttpResponse(o) | [
"[email protected]"
] | |
f4acc1b6983de406da0a4d2d27544abda966e6da | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/learnbgame_hops/operators/misc/triangulate_ngons.py | 392d2147834108f25c1e15670053e2c33eba33e5 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,310 | py | import bpy
class HOPS_OT_TriangulateNgons(bpy.types.Operator):
bl_idname = "hops.triangulate_ngons"
bl_label = "triangulate ngons"
bl_description = "triangulate ngons"
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context):
return True
def execute(self, context):
bpy.ops.object.convert(target='MESH')
for obj in bpy.context.selected_objects:
bpy.context.view_layer.objects.active = obj
bpy.ops.object.editmode_toggle()
bpy.ops.mesh.select_all(action='DESELECT')
bpy.ops.mesh.select_face_by_sides(number=4, type='GREATER')
bpy.ops.mesh.quads_convert_to_tris(quad_method='BEAUTY', ngon_method='BEAUTY')
bpy.ops.object.editmode_toggle()
return {"FINISHED"}
class HOPS_OT_TriangulateModifier(bpy.types.Operator):
bl_idname = "hops.triangulate_mod"
bl_label = "triangulate mod"
bl_description = "triangulate mod"
bl_options = {"REGISTER"}
@classmethod
def poll(cls, context):
return True
def execute(self, context):
selected = context.selected_objects
for obj in selected:
obj.modifiers.new(name="Triangulate", type="TRIANGULATE")
return {"FINISHED"}
| [
"[email protected]"
] | |
681d91cef8af005ef2529196c8d13b4eddf0314d | dc39ccc50b7d34e5de84f3cc132c5cc096a32656 | /Sanic/4-NamedURLRedirect/main.py | 035aee847b4d83b28db14dd020c1e8de99eb6971 | [] | no_license | Shukladas1115/Python | 0947aefd62a9ce4c3140360cb7259b031368709c | feb32bc2e2e7df377fc2d92330bfdacb83f31a55 | refs/heads/master | 2022-02-20T04:15:56.036495 | 2019-08-26T16:36:52 | 2019-08-26T16:36:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | from sanic import Sanic
from sanic import response
app = Sanic(__name__)
@app.route('/')
async def index(request):
# generate a URL for the endpoint `post_handler`
url = app.url_for('post_handler', post_id=5)
# the URL is `/posts/5`, redirect to it
return response.redirect(url)
@app.route('/posts/<post_id>')
async def post_handler(request, post_id):
return response.text('Post - {}'.format(post_id))
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000, debug=True) | [
"[email protected]"
] | |
892a164d926d0ef0796ff7c322ef22178f33cb1e | 3f16e66b33b39df8866947fcf1d8249476725c03 | /mymodule/test1/file1.py | 051436656cfe2394e1be22e4365ad29c246bff90 | [] | no_license | VadimVolynkin/learning_python3 | ea3559e0f01b4c9e09ae82b76ca315de8e41ecc4 | 872f0a2ac296ec3242ac9b81f63a29f09bc614fa | refs/heads/main | 2023-08-10T05:39:04.376172 | 2021-09-07T14:44:20 | 2021-09-07T14:44:20 | 339,133,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | print('hello from file1.py')
a1 = 'its a1 from file1'
def __a2():
a2 = 'its a2 from file1'
return a2
| [
"[email protected]"
] | |
5a7c1fe7ce5663916fe48e08b1c9a759329dca0c | 9bdd421f0bd5cb30a0429e11b23bd85ed34b006a | /account/views.py | 019f9c68589e98f18cf1df20bd508d7642623be2 | [] | no_license | MrAch26/yugioh_django_proj | 8f0f0fbf0cb6e4ec4fac8757a7236fbb08099689 | 9cd363a3ab9019c92973454dab5eb812894c4c37 | refs/heads/main | 2023-03-27T21:53:45.240113 | 2020-10-25T07:39:22 | 2020-10-25T07:39:22 | 305,049,382 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,289 | py | from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.shortcuts import render, redirect
from django.urls import reverse, reverse_lazy
from django.views.generic import CreateView, UpdateView, DetailView
from account.forms import UserSignupForm, ProfileViewForm
from account.models import Profile
from trading_cards.models import Card, Trade
class UserSignUp(CreateView):
template_name = "registration/signup.html"
model = User
form_class = UserSignupForm
success_url = 'home'
failed_message = "The User couldn't be added"
def form_valid(self, form):
user_to_add = form.cleaned_data
# check the data we get when the form is valid
print("user_to_add", user_to_add)
super(UserSignUp, self).form_valid(form)
# inherit from ModelFormMixin : form_valid(form)
# Saves the form instance, sets the current object for the view,
# and redirects to get_success_url().
print("---------form valid")
# The form is valid, automatically sign-in the user
user = authenticate(self.request, username=form.cleaned_data['username'],
password=form.cleaned_data['password1'])
if user is None:
print("---------user none")
# User not validated for some reason, return standard form_valid() response
# Inherit from TemplateResponseMixin :
# render_to_response(context, **response_kwargs)¶
return self.render_to_response(
self.get_context_data(form=form,
failed_message=self.failed_message))
else:
print("-----------user good")
# Log the user in
login(self.request, user)
# Redirect to success url
return redirect(reverse(self.get_success_url()))
class ProfileView(UpdateView):
model = Profile
template_name = 'profile.html'
form_class = ProfileViewForm
success_url = reverse_lazy('home')
def my_deck(request):
trade = Trade.objects.all()
return render(request, 'my_deck.html', {'trade': trade})
class MyCard(DetailView):
model = Card
# todo: add details view for deck if relevant Maybe if time
| [
"[email protected]"
] | |
68f3fb9a96aa5c00e2fb8dedab67d2f23725c127 | edb88981aa1420af7e074068ed7818b9d904a3dd | /trunk/minds/test/test_app_httpserver.py | 91a8c2880f3ee0c0508f61d743a9000402944147 | [] | no_license | BackupTheBerlios/mindretrieve-svn | 101c0f1dfc25d20d5f828b6fd0d43301b773af4e | 463745fcf1c1d5b1f6c201c30bcc339c99b437ed | refs/heads/master | 2021-01-22T13:57:31.225772 | 2006-04-28T04:24:43 | 2006-04-28T04:24:43 | 40,801,743 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,765 | py | """
"""
import StringIO
import unittest
from minds.safe_config import cfg as testcfg
from minds import app_httpserver
class AppHTTPRequestHandlerFixture(app_httpserver.AppHTTPRequestHandler):
def __init__(self):
pass
class TestAppHTTPRequestHandler(unittest.TestCase):
def _test_lookup(self, url, expected):
handler = AppHTTPRequestHandlerFixture()
self.assertEqual(handler._lookup_cgi(url), expected)
def test_lookup_cgi(self):
from minds.cgibin import history
from minds.cgibin import weblib
from minds.cgibin import weblibMultiForm
self._test_lookup('', (weblib, '/', '', ''))
self._test_lookup('/', (weblib, '/', '', ''))
self._test_lookup('/history/item?1', (history, '/history', '/item', '1'))
self._test_lookup('/weblib/multiform/100', (weblibMultiForm, '/weblib/multiform', '/100', ''))
class TestMisc(unittest.TestCase):
def test_convertPath2Module1(self):
self.assertEqual(
app_httpserver._convertPath2Module(r'./minds\admin\tmpl/home.html'),
('minds.admin.tmpl.home','home'),
)
def test_convertPath2Module2(self):
self.assertEqual(
app_httpserver._convertPath2Module(r'./minds\admin\snoop'),
('minds.admin.snoop','snoop'),
)
def test_convertPath2Module3(self):
self.assertEqual(
app_httpserver._convertPath2Module(r'/minds/admin/snoop.py'),
('minds.admin.snoop','snoop'),
)
class TestCGIFileFilter(unittest.TestCase):
DATA1 = """date:04/19/05\r
\r
line1
line2
"""
DATA2 = """line3
line4"""
def setUp(self):
self.buf = StringIO.StringIO()
self.fp = app_httpserver.CGIFileFilter(self.buf)
def test1(self):
self.fp.write('\r\n\r\n')
self.fp.flush()
self.assertEqual(self.buf.getvalue(), 'HTTP/1.0 200 OK\r\n\r\n\r\n')
def test_nodirective(self):
self.fp.write(self.DATA1)
self.fp.write(self.DATA2)
self.fp.flush()
self.assertEqual(self.buf.getvalue(), 'HTTP/1.0 200 OK\r\n' +
self.DATA1 + self.DATA2)
def test_status(self):
self.fp.write('404 not found\r\n')
self.fp.write(self.DATA1)
self.fp.write(self.DATA2)
self.fp.flush()
self.assertEqual(self.buf.getvalue(), 'HTTP/1.0 404 not found\r\n'
+ self.DATA1 + self.DATA2)
def test_location(self):
self.fp.write('loCATion : http://abc.com/index.html\r\n')
self.fp.write(self.DATA1)
self.fp.write(self.DATA2)
self.fp.flush()
self.assertEqual(self.buf.getvalue(),
"""HTTP/1.0 302 Found\r
loCATion : http://abc.com/index.html\r
""" + \
self.DATA1 + self.DATA2)
def test_states(self):
# verify CGIFileFilter has gone through each state
self.assertEqual(self.fp.state, self.fp.INIT)
self.fp.write('200 ok\r\n\r\n')
self.assertEqual(self.fp.state, self.fp.BUFFER)
self.fp.write('.'*(self.fp.MAX_BUFFER+1))
self.assertEqual(self.fp.state, self.fp.SENT)
buf_size = len(self.buf.getvalue())
self.assert_(buf_size > self.fp.MAX_BUFFER+1) # some HTTP info + content
# still accepting output at SENT state
self.fp.write('.')
self.assertEqual(len(self.buf.getvalue()), buf_size+1)
def test_buffer(self):
# verify data is buffered until flush
self.fp.write('200 ok\r\n\r\n')
self.fp.write('.')
self.assertEqual(len(self.buf.getvalue()), 0)
self.fp.flush()
self.assert_(len(self.buf.getvalue()) > 0)
if __name__ == '__main__':
unittest.main() | [
"tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990"
] | tungwaiyip@785ff9d5-dded-0310-b5f2-a5aff206d990 |
3668b4615b62655571841c3fe2962d8a50e0b33f | 1b5ab3f252069181b5e07d4d6d177ab82e942e51 | /Homework3/Part 1/tt1.py | e219c5bb06a667aa68f443395c6215ac7c9e253b | [] | no_license | phamhailongg/C4T9 | 59214081224f37b356e209d57f0865632dccc8f6 | c400005012fb349c1388dd92c8e590322bb203e4 | refs/heads/master | 2021-07-06T11:10:05.283974 | 2019-05-05T21:46:04 | 2019-05-05T21:46:04 | 152,599,080 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | from turtle import *
speed(10)
color("red")
for i in range(4):
lt(120)
fd(100)
lt(-60)
fd(100)
lt(-120)
fd(100)
lt(-60)
fd(100)
lt(30)
mainloop() | [
"[email protected]"
] | |
f7a0a2fa0e865a49765a53208422402c335ba849 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_discomposing.py | bafc1a64c9a8785f9dbb06454a7311b879d585bc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 240 | py |
#calss header
class _DISCOMPOSING():
def __init__(self,):
self.name = "DISCOMPOSING"
self.definitions = discompose
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['discompose']
| [
"[email protected]"
] | |
dd9ea44609ed4c96315ef9e7285fbe5f871730ce | 30bd7e8abe0a15fbb8f1b1e4a3a9a15a3ad124a9 | /romans/src/utils/roman.py | c2a15aca81d4265a81dc04ee59ee231d8b7b4fca | [] | no_license | TiagoArrazi/Romans | c96cac19a36e5e89ea719b084693b2af0f6e1cf2 | f2841931fb9b7428acdc4604dae0535508002781 | refs/heads/master | 2020-07-12T01:53:44.220206 | 2019-08-27T12:31:34 | 2019-08-27T12:31:34 | 204,688,868 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,057 | py | from resources.symbols import symbols
class Roman:
@classmethod
def make_it_roman(cls, number):
if 900 <= int(number) <= 3000:
mult = divmod(int(number), 1000)
if mult[0] > 0 and mult[1] == 0:
return symbols["1000"] * mult[0]
c_amount = (1000 - int(number)) // 100
if c_amount > 0:
return f"{symbols['100']}{symbols['1000']}"
if c_amount < 0:
return f"{symbols['1000']}{abs(c_amount) * symbols['100']}"
elif 400 <= int(number) <= 800:
if number == "500":
return symbols["500"]
c_amount = (500 - int(number)) // 100
if c_amount > 0:
return f"{symbols['100']}{symbols['500']}"
if c_amount < 0:
return f"{symbols['500']}{abs(c_amount) * symbols['100']}"
elif 90 <= int(number) <= 300:
mult = divmod(int(number), 100)
if mult[0] > 0 and mult[1] == 0:
return symbols["100"] * mult[0]
c_amount = (100 - int(number)) // 10
if c_amount > 0:
return f"{symbols['10']}{symbols['100']}"
if c_amount < 0:
return f"{symbols['100']}{abs(c_amount) * symbols['10']}"
elif 40 <= int(number) <= 80:
if number == "50":
return symbols["50"]
c_amount = (50 - int(number)) // 10
if c_amount > 0:
return f"{symbols['10']}{symbols['50']}"
if c_amount < 0:
return f"{symbols['50']}{abs(c_amount) * symbols['10']}"
elif 9 <= int(number) <= 30:
mult = divmod(int(number), 10)
if mult[0] > 0 and mult[1] == 0:
return symbols["10"] * mult[0]
c_amount = (10 - int(number))
if c_amount > 0:
return f"{symbols['1']}{symbols['10']}"
if c_amount < 0:
return f"{symbols['10']}{abs(c_amount) * symbols['1']}"
elif 4 <= int(number) <= 8:
if number == "5":
return symbols["5"]
c_amount = (5 - int(number))
if c_amount > 0:
return f"{symbols['1']}{symbols['5']}"
if c_amount < 0:
return f"{symbols['5']}{abs(c_amount) * symbols['1']}"
else:
return int(number) * symbols["1"]
@classmethod
def convert_digits(cls, number):
try:
if 1 <= int(number) <= 3000:
strip_number_list = [(10 ** index) // 10 * int(n)
for index, n
in zip(range(len(number), 0, -1), number)]
converted_number_list = list()
for item in strip_number_list:
converted_number_list.append(cls.make_it_roman(str(item)))
return ''.join(converted_number_list)
except ValueError:
return False
return False
| [
"[email protected]"
] | |
f14c49b90c661b6ac6e514e6ecfda425e0621418 | 17f2ea360d2cc77ff45ab7b61f0e03d3c9d96247 | /Stock/Trade/AccountManager/StopMode/DyStockStopLossMaMode.py | e53c9179190315d960e3820c1281b459be2d9596 | [
"MIT"
] | permissive | yutiansut/DevilYuan | 89aba2728d42a686cf989b74283a5197edfe6b49 | 6467f8c33c4692d3616f0eb0b0bf974d16e95836 | refs/heads/master | 2020-03-19T18:43:11.674992 | 2018-06-11T03:17:32 | 2018-06-11T03:17:32 | 136,821,539 | 2 | 1 | MIT | 2018-06-11T03:17:33 | 2018-06-10T15:37:24 | Python | UTF-8 | Python | false | false | 2,409 | py | from .DyStockStopMode import *
from ...DyStockTradeCommon import *
class DyStockStopLossMaMode(DyStockStopMode):
stopLossPnlRatio = -5
def __init__(self, accountManager, dataEngine, ma):
super().__init__(accountManager)
self._dataEngine = dataEngine
self._daysEngine = self._dataEngine.daysEngine
self._ma = ma
self._tradeStartTime = '14:55:00'
self._curInit()
def _curInit(self):
self._preparedData = {}
def onOpen(self, date):
self._curInit()
preDate = self._daysEngine.tDaysOffsetInDb(date, -1)
for code in self._accountManager.curPos:
if not self._daysEngine.loadCode(code, [preDate, -self._ma+2], latestAdjFactorInDb=False):
return False
df = self._daysEngine.getDataFrame(code)
if df.shape[0] != (self._ma - 1): return False
self._preparedData[code] = df['close'].values.tolist()
return True
def _processAdj(self, code, tick):
""" 处理除复权 """
if tick.preClose is None: return
if code not in self._preparedData: return False
if code not in self._accountManager.curPos: return False
closes = self._preparedData[code]
if tick.preClose == closes[-1]:
return True
# 复权
adjFactor = tick.preClose/closes[-1]
# 价格
closes = list(map(lambda x,y:x*y, closes, [adjFactor]*len(closes)))
closes[-1] = tick.preClose # 浮点数的精度问题
self._preparedData[code] = closes
return True
def _stopLoss(self, code, tick):
ma = (sum(self._preparedData[code]) + tick.price)/self._ma
pos = self._accountManager.curPos[code]
if tick.price < ma and pos.pnlRatio < self.stopLossPnlRatio:
self._accountManager.closePos(tick.datetime, code, getattr(tick, DyStockTradeCommon.sellPrice), DyStockSellReason.stopLoss)
def onTicks(self, ticks):
for code, pos in self._accountManager.curPos.items():
tick = ticks.get(code)
if tick is None:
continue
if tick.time < self._tradeStartTime:
return
if not self._processAdj(code, tick):
continue
self._stopLoss(code, tick)
def onBars(self, bars):
self.onTicks(bars) | [
"[email protected]"
] | |
05ff5f5a599c92b2f689b4a53313597783b6caef | 727cdc7c9af6fdf6b4eb8444197718e5c6760019 | /asin_keyword/cookie_sele_local.py | f538225e776bc68fc0a60a43531ef76c1f359afa | [] | no_license | newer027/amazon_crawler | 0cc6feb30f9180ae48ac936eeb6af41ec06eadfd | 39d6867a8dd56b90dae5e98aa44e6df274439f8e | refs/heads/master | 2022-11-23T17:04:33.995126 | 2020-04-03T15:42:42 | 2020-04-03T15:42:42 | 252,774,253 | 1 | 0 | null | 2022-11-22T01:44:53 | 2020-04-03T15:42:31 | CSS | UTF-8 | Python | false | false | 4,831 | py | import time, pickle
from selenium import webdriver
from .validation import validation, validation_jp
from PIL import Image
from pytesseract import image_to_string
from random import *
from urllib.request import urlretrieve
from bs4 import BeautifulSoup
#from .emails import send_email
from pyvirtualdisplay import Display
import requests, shutil
def get_captcha(driver, element, path):
# now that we have the preliminary stuff out of the way time to get that image :D
location = element.location
size = element.size
# saves screenshot of entire page
driver.save_screenshot(path)
# uses PIL library to open image in memory
image = Image.open(path)
#image.show()
left = location['x']
top = location['y']
right = location['x'] + size['width']
bottom = location['y'] + size['height']
print(left, top, right, bottom)
# image = image.crop((left, top, right, bottom)) # defines crop points
image = image.crop((left*2, top*2, right*2, bottom*2)) # defines crop points
image.save(path, 'jpeg') # saves new cropped image
def validate(driver,country):
im = driver.find_element_by_id('auth-captcha-image')
# im = im.get_attribute('src')
# urlretrieve(im, "captcha.jpeg")
get_captcha(driver,im,"captcha.jpeg")
"""
agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:52.0) Gecko/20100101 Firefox/52.0'
headers = {
'User-Agent': agent,
'Host': "opfcaptcha-prod.s3.amazonaws.com",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch, br",
"Accept-Language": "zh-CN,zh;q=0.8",
"Connection": "keep-alive"
}
print(im)
response = requests.get(im, stream=True, headers=headers)
with open('captcha.jpeg', 'wb') as out_file:
shutil.copyfileobj(response.raw, out_file)
del response
"""
im = 'captcha.jpeg'
im = Image.open(im)
im = im.convert('L')
def initTable(threshold=140):
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
return table
binaryImage = im.point(initTable(), '1')
binaryImage.show()
characters = image_to_string(binaryImage,config='-psm 7')
char_list = characters.split(' ')
characters = ''.join(char_list)
print(characters)
search_box = driver.find_element_by_id('ap_password')
if country=='jp':
for i in validation_jp['password']:
time.sleep(0.8-random()*0.5)
search_box.send_keys(i)
else:
for i in validation['password']:
time.sleep(0.8-random()*0.5)
search_box.send_keys(i)
time.sleep(4)
search_box = driver.find_element_by_id('auth-captcha-guess')
for i in characters:
time.sleep(0.5-random()*0.2)
search_box.send_keys(i)
time.sleep(3)
driver.find_element_by_id('signInSubmit').click()
# amazon = AmazonAPI('AKIAJ2TPWCFJMKXPSJVQ','ixmfea5B2xKFukyuR/aiBzkI6f+umvISvYlzzBBy','newer027-20')
# asin="B01LCDJ7LG"
# ean='0848061039726'
# product = amazon.lookup(ItemId=asin)
# ean = product.ean
# print(ean)
# driver = webdriver.Chrome("/Users/Jacob/Desktop/amazon_keyword/chromedriver")
def cookie_sele(country):
# display = Display(visible=0, size=(1920, 1080)).start()
# driver = webdriver.Firefox()
product_url_am = "https://vendorexpress.amazon.com/home?ref=VE_LANDING"
product_url_eu = "https://vendorexpress.amazon.eu/home?ref=VE_LANDING"
product_url_jp = "https://vendorexpress.amazon.co.jp/home?ref=VE_LANDING"
driver = webdriver.Chrome("/Users/Jacob/Desktop/amazon_keyword/chromedriver")
try:
if country=='am':
driver.get(product_url_am)
start_url="https://vendorexpress.amazon.com/ap/signin"
cookies="cookies_am.pkl"
elif country=='eu':
driver.get(product_url_eu)
start_url="https://vendorexpress.amazon.eu/ap/signin"
cookies="cookies_eu.pkl"
else:
driver.get(product_url_jp)
start_url="https://vendorexpress.amazon.co.jp/ap/signin"
cookies="cookies_jp.pkl"
search_box = driver.find_element_by_id('ap_email')
if country=='jp':
search_box.send_keys(validation_jp['id'])
else:
search_box.send_keys(validation['id'])
time.sleep(5)
while driver.current_url.startswith(start_url):
validate(driver,country)
time.sleep(15)
html = driver.page_source
soup = BeautifulSoup(html, "lxml")
print(soup.title.string)
pickle.dump( driver.get_cookies() , open(cookies,"wb"))
finally:
driver.quit()
#display.stop()
| [
"[email protected]"
] | |
3d7e4e20678a99f2171c8af491263ebaaf9b1f39 | 34a5921552537d96d9680f88b94be1706e5c8f1a | /facets/common/consts.py | a69b16a8e80bc022fd6cd90a8ebb69d306714fae | [
"Apache-2.0"
] | permissive | hunterhector/DDSemantics | 11f1a85486349627036626d3b638db39f70030fe | 65235d8897bce403e5d628ed912e516b28254c74 | refs/heads/master | 2023-07-13T05:20:13.211363 | 2023-06-21T21:44:37 | 2023-06-21T21:44:37 | 123,484,643 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 150 | py | """Define constants"""
MENTION_START_TOKEN = "[unused1]"
MENTION_END_TOKEN = "[unused2]"
HEADER_END_TOKEN = "[unused3]"
CLS = "[CLS]"
SEP = "[SEP]"
| [
"[email protected]"
] | |
2897a346fb526a6e0e57f8e45f21e07c4f5a4bb0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02267/s584392095.py | 6a5daa87a557535ac58fe4e73ddfee745deeec4f | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | n = int(input())
s = [int(x) for x in input().split()]
q = int(input())
t = [int(x) for x in input().split()]
cnt = 0
for x in t:
if x in s:
cnt += 1
print(cnt) | [
"[email protected]"
] | |
57ca134de961ddf6a494d2abcf622a29832b057d | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-2/07acc579db839170122fc66505a886ef023d5f4f-<execute_install>-bug.py | 46e7feb105b1c13945aa837f5c23514abfcee9e9 | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,454 | py |
def execute_install(self):
'\n uses the args list of roles to be installed, unless -f was specified. The list of roles\n can be a name (which will be downloaded via the galaxy API and github), or it can be a local .tar.gz file.\n '
role_file = self.options.role_file
if ((len(self.args) == 0) and (role_file is None)):
raise AnsibleOptionsError('- you must specify a user/role name or a roles file')
no_deps = self.options.no_deps
force = self.options.force
roles_left = []
if role_file:
try:
f = open(role_file, 'r')
if (role_file.endswith('.yaml') or role_file.endswith('.yml')):
try:
required_roles = yaml.safe_load(f.read())
except Exception as e:
raise AnsibleError(('Unable to load data from the requirements file: %s' % role_file))
if (required_roles is None):
raise AnsibleError(('No roles found in file: %s' % role_file))
for role in required_roles:
if ('include' not in role):
role = RoleRequirement.role_yaml_parse(role)
display.vvv(('found role %s in yaml file' % str(role)))
if (('name' not in role) and ('scm' not in role)):
raise AnsibleError('Must specify name or src for role')
roles_left.append(GalaxyRole(self.galaxy, **role))
else:
with open(role['include']) as f_include:
try:
roles_left += [GalaxyRole(self.galaxy, **r) for r in (RoleRequirement.role_yaml_parse(i) for i in yaml.safe_load(f_include))]
except Exception as e:
msg = 'Unable to load data from the include requirements file: %s %s'
raise AnsibleError((msg % (role_file, e)))
else:
display.deprecated('going forward only the yaml format will be supported', version='2.6')
for rline in f.readlines():
if (rline.startswith('#') or (rline.strip() == '')):
continue
display.debug(('found role %s in text file' % str(rline)))
role = RoleRequirement.role_yaml_parse(rline.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
f.close()
except (IOError, OSError) as e:
raise AnsibleError(('Unable to open %s: %s' % (role_file, str(e))))
else:
for rname in self.args:
role = RoleRequirement.role_yaml_parse(rname.strip())
roles_left.append(GalaxyRole(self.galaxy, **role))
for role in roles_left:
if (role_file and self.args and (role.name not in self.args)):
display.vvv(('Skipping role %s' % role.name))
continue
display.vvv(('Processing role %s ' % role.name))
if (role.install_info is not None):
if ((role.install_info['version'] != role.version) or force):
if force:
display.display(('- changing role %s from %s to %s' % (role.name, role.install_info['version'], (role.version or 'unspecified'))))
role.remove()
else:
display.warning(('- %s (%s) is already installed - use --force to change version to %s' % (role.name, role.install_info['version'], (role.version or 'unspecified'))))
continue
elif (not force):
display.display(('- %s is already installed, skipping.' % str(role)))
continue
try:
installed = role.install()
except AnsibleError as e:
display.warning(('- %s was NOT installed successfully: %s ' % (role.name, str(e))))
self.exit_without_ignore()
continue
if ((not no_deps) and installed):
role_dependencies = (role.metadata.get('dependencies') or [])
for dep in role_dependencies:
display.debug(('Installing dep %s' % dep))
dep_req = RoleRequirement()
dep_info = dep_req.role_yaml_parse(dep)
dep_role = GalaxyRole(self.galaxy, **dep_info)
if (('.' not in dep_role.name) and ('.' not in dep_role.src) and (dep_role.scm is None)):
continue
if (dep_role.install_info is None):
if (dep_role not in roles_left):
display.display(('- adding dependency: %s' % str(dep_role)))
roles_left.append(dep_role)
else:
display.display(('- dependency %s already pending installation.' % dep_role.name))
elif (dep_role.install_info['version'] != dep_role.version):
display.warning(('- dependency %s from role %s differs from already installed version (%s), skipping' % (str(dep_role), role.name, dep_role.install_info['version'])))
else:
display.display(('- dependency %s is already installed, skipping.' % dep_role.name))
if (not installed):
display.warning(('- %s was NOT installed successfully.' % role.name))
self.exit_without_ignore()
return 0
| [
"[email protected]"
] | |
e3f8022857e30db6341da4c8f88e97f4e7063f57 | 6d8faae66dd6332836bb11d7f02d6867c95d2a65 | /glast/pointlike/python/uw/like/likelihood_fit.py | 7dec093d277b975937d18b773ffedc0d0b11c596 | [] | no_license | Areustle/fermi-glast | 9085f32f732bec6bf33079ce8e2ea2a0374d0228 | c51b821522a5521af253973fdd080e304fae88cc | refs/heads/master | 2021-01-01T16:04:44.289772 | 2017-09-12T16:35:52 | 2017-09-12T16:35:52 | 97,769,090 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,382 | py | """A module providing functionality for parametrizing a likelihood curve
by a simple function.
Classes:
LogLikelihood: a representation of a likelihood curve
Authors: Eric Wallace, Matthew Kerr
"""
__version__ = "$Revision: 1.1.2.1 $"
#$Header: /glast/ScienceTools/glast/pointlike/python/uw/like/Attic/likelihood_fit.py,v 1.1.2.1 2015/08/13 18:03:08 jasercio Exp $
import numpy as np
import scipy.optimize as opt
class LogLikelihood(object):
"""A representation of a log likelihood curve by a Poisson-like function
The representation used here follows the approach of Nolan, et al. The
likelihood is represented by a three-parameter function of a form similar
to the Poisson distribution PMF. The peak is found by maximizing the
provided log likelihood function. The parameters are then found by a least
squares fit using the peak, two points equispaced around it, and zero.
The parametrizing function is
f(s) = logL - logL_max = n*np.log(e*(s+b)) - e*(s+b) - n*np.log(n) + n
with n = e*(s_peak+b)
"""
def __init__(self,loglike,initial_value=1e-10,fit_max = True,pars=None):
"""Create a LogLikelihood instance.
loglike: The log likelihood function to be parametrized. Should be a
callable with one argument.
initial_value: An initial guess at the maximum of the provided
function. The default of 1e-10 should be a reasonable guess for
the normalization of a PowerLaw model.
fit_max: Whether to use fmin to maximize the log likelihood. If False
initial_value will be taken as the position of the maximum of the
log likelihood, so this should only be set to False if the value
passed as initial_value is the result of a previous maximization
of the provided function.
pars: A length three sequence providing the values for the parameters
of the fit function: s_peak,e, and b. If provided, these values
will be used and the loglike argument will be ignored.
"""
self.function = self._setup_function(loglike)
self.saved_points = np.array([])
self.saved_values = np.array([])
if pars is not None:
try:
assert(hasattr(pars,'__iter__') and len(pars)==3)
self.pars = pars
except AssertionError:
print('Keyword argument pars must be a sequence of length 3.')
print('Will attempt to derive parameters from provided function')
self.pars = self._find_pars(initial_value,fit_max = fit_max)
else:
self.pars = self._find_pars(initial_value,fit_max = fit_max)
self._check_agreement()
def _setup_function(self,function):
"""Setup caching of values passed to the log likelihood function."""
def _function(x):
if x in self.saved_points:
ll = self.saved_values[self.saved_points==x][0]
else:
ll = function(x)
self.saved_points = np.append(self.saved_points,x)
self.saved_values = np.append(self.saved_values,ll)
return ll
return _function
def _find_pars(self,initial_value,fit_max = False):
"""Find the best fit parameters for the fit function"""
if fit_max:
self.mode = opt.fmin(lambda x: -self.function(x),initial_value)[0]
else:
self.mode = initial_value
self.max = self.function(self.mode)
#xs = np.array([0,max/2,max,max*2])
#ys = np.array([self.function(x) for x in xs])
xs = self.saved_points.copy()
ys = self.saved_values.copy()
ys = ys - ys.max()
return opt.leastsq(lambda x:self._poisson(x,xs)-ys,np.array([self.mode,10/self.mode,xs[-1]]),maxfev=5000)[0]
def _poisson(self,pars,s):
"""Calculate the value of the parametrizing function for some parameters.
pars: A sequence of length 3 providing the parameters s_peak, e, and b.
s: The point at which to evaluate the function. Can be a numpy array.
"""
if pars[0]<0: return -1e10
s_peak,e,b = pars[0],pars[1],pars[2];n = e*(s_peak+b)
#logL - logL_max = n*np.log(e*(s+b))-e*(s+b) - n*np.log(e*(s_peak+b))+e*(s_peak+b)
#simplified:
return n*np.log((s+b)/(s_peak+b)) + e*(s_peak-s)
def __call__(self,x):
"""Return the value of the parametrizing function at point x."""
return self._poisson(self.pars,x) + self.max
def find_logl_change(self,initial_value,delta_logl):
"""Find the points where the likelihood has decreased by delta_logl.
Returns a tuple of the (low, high) values. If the likelihood at zero
differs from the max by less than the specified change, return zero
for the lower value.
"""
#First, find lower value
lo = 1e-20 #basically zero
hi = initial_value
ll_0 = self.function(hi)
if ll_0-self.function(lo)>delta_logl:
for i in xrange(20):
avg = .5*(hi+lo)
ll = self.function(avg)
if ll_0-ll<delta_logl: hi = avg
else: lo = avg
if abs(ll_0-ll-delta_logl)<.01: break
lo_val = avg
else: lo_val = lo
#Now the upper value
lo = initial_value
hi = initial_value*10
while ll_0-self.function(hi)<delta_logl: hi+=1
for i in xrange(20):
avg = .5*(lo+hi)
ll = self.function(avg)
if ll_0-ll<delta_logl: lo = avg
else: hi = avg
if abs(ll_0-ll-delta_logl)<.01: break
hi_val = avg
return (lo_val,hi_val)
def _check_agreement(self):
lo,hi = self.find_logl_change(self.mode,10)
lo_ll,hi_ll = self.function(lo),self.function(hi)
lo_val,hi_val = self(lo),self(hi)
if abs(1-lo_ll/lo_val) > .05:
print("Warning: fit function differs from log likelihood by {0:.02}\% in the low tail".format((1-lo_ll/lo_val)*100))
if abs(1-hi_ll/hi_val) > .05:
print("Warning: fit function differs from log likelihood by {0:.02}\% in the high tail".format((1-lo_ll/lo_val)*100))
def ts(self):
return self(self.mode)-self(0)
| [
"[email protected]"
] | |
a95c88307396ee0164e6f263644fc07b185a3d85 | 1089bc72856fe3ef0edd4b17b2f07b8ec5de8e14 | /firecares/settings/base.py | 1090ad22ec43635bb3cf9e8ae536318c7280b299 | [
"MIT"
] | permissive | JWileczek/firecares | e521c9d9f829fc60f13c2d051be89b5feadb5fc0 | dd82e6e720cdaaf0bacd7a2cc51669341a29ffae | refs/heads/master | 2020-12-25T12:41:04.124970 | 2015-08-30T15:38:56 | 2015-08-30T15:38:56 | 41,690,086 | 0 | 0 | null | 2015-08-31T17:29:12 | 2015-08-31T17:29:12 | null | UTF-8 | Python | false | false | 7,905 | py | import os
from kombu import Queue
DEBUG = True
TEMPLATE_DEBUG = DEBUG
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.getenv('DATABASE_NAME', 'firecares'),
'USER': os.getenv('DATABASE_USER', 'firecares'),
'PASSWORD': os.getenv('DATABASE_PASSWORD', 'password'),
'HOST': os.getenv('DATABASE_HOST', 'localhost'),
'PORT': os.getenv('DATABASE_PORT', '5432'),
},
'nfirs': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': os.getenv('NFIRS_DATABASE_NAME', 'nfirs'),
'USER': os.getenv('NFIRS_DATABASE_USER', 'firecares'),
'PASSWORD': os.getenv('NFIRS_DATABASE_PASSWORD', 'password'),
'PORT': os.getenv('NFIRS_DATABASE_PORT', '5432'),
'HOST': os.getenv('NFIRS_DATABASE_HOST', 'localhost'),
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = '/var/www/firecares/media/'
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/var/www/firecares/static/'
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = '$keb7sv^%c+_7+94u6_!lc3%a-3ima9eh!xyj%$xa8yibv&ogr'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
TEMPLATE_CONTEXT_PROCESSORS = (
"django.core.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.i18n",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"firecares.firecares_core.context_processors.third_party_tracking_ids",
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'firecares.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'firecares.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(PROJECT_ROOT, 'templates'),
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'autocomplete_light',
'django.contrib.admin',
'django.contrib.gis',
'django.contrib.humanize',
'firecares.firecares_core',
'firecares.firestation',
'firecares.usgs',
'jsonfield',
'compressor',
'storages',
'widget_tweaks',
'firecares.tasks',
'registration'
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
# Celery settings.
BROKER_URL = os.getenv('BROKER_URL', 'amqp://guest:[email protected]//')
CELERY_RESULT_BACKEND = os.getenv('CELERY_RESULT_BACKEND', 'amqp')
AWS_STORAGE_BUCKET_NAME = os.getenv('AWS_STORAGE_BUCKET_NAME', None)
AWS_ACCESS_KEY_ID = os.getenv('AWS_ACCESS_KEY_ID', None)
AWS_SECRET_ACCESS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', None)
MAPBOX_ACCESS_TOKEN = os.getenv('MAPBOX_ACCESS_TOKEN', None)
GOOGLE_ANALYTICS_TRACKING_ID = os.getenv('GOOGLE_ANALYTICS_TRACKING_ID', None)
LOGIN_REDIRECT_URL = '/'
LOGIN_URL = '/login'
CELERY_DEFAULT_QUEUE = "default"
CELERY_DEFAULT_EXCHANGE = "default"
CELERY_DEFAULT_EXCHANGE_TYPE = "direct"
CELERY_DEFAULT_ROUTING_KEY = "default"
CELERY_CREATE_MISSING_QUEUES = True
CELERY_IMPORTS = (
'firecares.tasks.cache',
'firecares.tasks.update',
)
CELERY_QUEUES = [
Queue('default', routing_key='default'),
Queue('cache', routing_key='cache'),
Queue('update', routing_key='update'),
Queue('email', routing_key='email'),
]
ACCOUNT_ACTIVATION_DAYS = 7
REGISTRATION_OPEN = False
EMAIL_HOST = os.getenv('EMAIL_HOST', 'localhost')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD', '')
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER', '')
EMAIL_PORT = os.getenv('EMAIL_PORT', 25)
EMAIL_SUBJECT_PREFIX = '[FireCARES] '
SERVER_EMAIL = os.getenv('SERVER_EMAIL', '')
DEFAULT_FROM_EMAIL = os.getenv('DEFAULT_FROM_EMAIL', '') | [
"[email protected]"
] | |
4ceda0b049891a9c2963a7c0c48c3f511140ac69 | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/HundredRun/PG_0132+151/sdB_PG_0132+151_lc.py | 8918441d2824176148a8e7a956f7ebd08c4bc153 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[356.757495,15.400942], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_PG_0132+151 /sdB_PG_0132+151_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
a3b152abeff9b59f25a919958f7e36901eaaa4ad | 385a63d3c9e6f5815979165001f78ec3d7b90cd2 | /DrivingTDM_SetupMatlabOOP/headerAndFunctionsMotor/ximc/python-profiles/STANDA/10MCWA168-20.py | f6af4a4b73c9f33f4b74a776ed1ee7067c55ade2 | [
"BSD-2-Clause"
] | permissive | Rasedujjaman/matlabOOP | 5abb6ec94998fda5e9214ed94cf67a42bf243d4f | e1f025ab9b00a3646719df23852079736d2b5701 | refs/heads/main | 2023-07-23T21:40:53.905045 | 2021-08-31T16:12:39 | 2021-08-31T16:12:39 | 378,249,559 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 22,523 | py | def set_profile_10MCWA168_20(lib, id):
worst_result = Result.Ok
result = Result.Ok
feedback_settings = feedback_settings_t()
feedback_settings.IPS = 4000
class FeedbackType_:
FEEDBACK_ENCODER_MEDIATED = 6
FEEDBACK_NONE = 5
FEEDBACK_EMF = 4
FEEDBACK_ENCODER = 1
feedback_settings.FeedbackType = FeedbackType_.FEEDBACK_NONE
class FeedbackFlags_:
FEEDBACK_ENC_TYPE_BITS = 192
FEEDBACK_ENC_TYPE_DIFFERENTIAL = 128
FEEDBACK_ENC_TYPE_SINGLE_ENDED = 64
FEEDBACK_ENC_REVERSE = 1
FEEDBACK_ENC_TYPE_AUTO = 0
feedback_settings.FeedbackFlags = FeedbackFlags_.FEEDBACK_ENC_TYPE_SINGLE_ENDED | FeedbackFlags_.FEEDBACK_ENC_TYPE_AUTO
feedback_settings.CountsPerTurn = 4000
result = lib.set_feedback_settings(id, byref(feedback_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
home_settings = home_settings_t()
home_settings.FastHome = 55
home_settings.uFastHome = 0
home_settings.SlowHome = 55
home_settings.uSlowHome = 0
home_settings.HomeDelta = 0
home_settings.uHomeDelta = 0
class HomeFlags_:
HOME_USE_FAST = 256
HOME_STOP_SECOND_BITS = 192
HOME_STOP_SECOND_LIM = 192
HOME_STOP_SECOND_SYN = 128
HOME_STOP_SECOND_REV = 64
HOME_STOP_FIRST_BITS = 48
HOME_STOP_FIRST_LIM = 48
HOME_STOP_FIRST_SYN = 32
HOME_STOP_FIRST_REV = 16
HOME_HALF_MV = 8
HOME_MV_SEC_EN = 4
HOME_DIR_SECOND = 2
HOME_DIR_FIRST = 1
home_settings.HomeFlags = HomeFlags_.HOME_USE_FAST | HomeFlags_.HOME_STOP_SECOND_BITS | HomeFlags_.HOME_STOP_FIRST_BITS | HomeFlags_.HOME_HALF_MV | HomeFlags_.HOME_DIR_FIRST
result = lib.set_home_settings(id, byref(home_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
move_settings = move_settings_t()
move_settings.Speed = 27
move_settings.uSpeed = 0
move_settings.Accel = 200
move_settings.Decel = 200
move_settings.AntiplaySpeed = 27
move_settings.uAntiplaySpeed = 0
class MoveFlags_:
RPM_DIV_1000 = 1
result = lib.set_move_settings(id, byref(move_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_settings = engine_settings_t()
engine_settings.NomVoltage = 1
engine_settings.NomCurrent = 1200
engine_settings.NomSpeed = 55
engine_settings.uNomSpeed = 0
class EngineFlags_:
ENGINE_LIMIT_RPM = 128
ENGINE_LIMIT_CURR = 64
ENGINE_LIMIT_VOLT = 32
ENGINE_ACCEL_ON = 16
ENGINE_ANTIPLAY = 8
ENGINE_MAX_SPEED = 4
ENGINE_CURRENT_AS_RMS = 2
ENGINE_REVERSE = 1
engine_settings.EngineFlags = EngineFlags_.ENGINE_LIMIT_RPM | EngineFlags_.ENGINE_ACCEL_ON
engine_settings.Antiplay = 9
class MicrostepMode_:
MICROSTEP_MODE_FRAC_256 = 9
MICROSTEP_MODE_FRAC_128 = 8
MICROSTEP_MODE_FRAC_64 = 7
MICROSTEP_MODE_FRAC_32 = 6
MICROSTEP_MODE_FRAC_16 = 5
MICROSTEP_MODE_FRAC_8 = 4
MICROSTEP_MODE_FRAC_4 = 3
MICROSTEP_MODE_FRAC_2 = 2
MICROSTEP_MODE_FULL = 1
engine_settings.MicrostepMode = MicrostepMode_.MICROSTEP_MODE_FRAC_256
engine_settings.StepsPerRev = 200
result = lib.set_engine_settings(id, byref(engine_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
entype_settings = entype_settings_t()
class EngineType_:
ENGINE_TYPE_BRUSHLESS = 5
ENGINE_TYPE_TEST = 4
ENGINE_TYPE_STEP = 3
ENGINE_TYPE_2DC = 2
ENGINE_TYPE_DC = 1
ENGINE_TYPE_NONE = 0
entype_settings.EngineType = EngineType_.ENGINE_TYPE_STEP | EngineType_.ENGINE_TYPE_NONE
class DriverType_:
DRIVER_TYPE_EXTERNAL = 3
DRIVER_TYPE_INTEGRATE = 2
DRIVER_TYPE_DISCRETE_FET = 1
entype_settings.DriverType = DriverType_.DRIVER_TYPE_INTEGRATE
result = lib.set_entype_settings(id, byref(entype_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
power_settings = power_settings_t()
power_settings.HoldCurrent = 50
power_settings.CurrReductDelay = 1000
power_settings.PowerOffDelay = 60
power_settings.CurrentSetTime = 300
class PowerFlags_:
POWER_SMOOTH_CURRENT = 4
POWER_OFF_ENABLED = 2
POWER_REDUCT_ENABLED = 1
power_settings.PowerFlags = PowerFlags_.POWER_SMOOTH_CURRENT | PowerFlags_.POWER_OFF_ENABLED | PowerFlags_.POWER_REDUCT_ENABLED
result = lib.set_power_settings(id, byref(power_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
secure_settings = secure_settings_t()
secure_settings.LowUpwrOff = 800
secure_settings.CriticalIpwr = 4000
secure_settings.CriticalUpwr = 5500
secure_settings.CriticalT = 800
secure_settings.CriticalIusb = 450
secure_settings.CriticalUusb = 520
secure_settings.MinimumUusb = 420
class Flags_:
ALARM_ENGINE_RESPONSE = 128
ALARM_WINDING_MISMATCH = 64
USB_BREAK_RECONNECT = 32
ALARM_FLAGS_STICKING = 16
ALARM_ON_BORDERS_SWAP_MISSET = 8
H_BRIDGE_ALERT = 4
LOW_UPWR_PROTECTION = 2
ALARM_ON_DRIVER_OVERHEATING = 1
secure_settings.Flags = Flags_.ALARM_ENGINE_RESPONSE | Flags_.ALARM_FLAGS_STICKING | Flags_.ALARM_ON_BORDERS_SWAP_MISSET | Flags_.H_BRIDGE_ALERT | Flags_.ALARM_ON_DRIVER_OVERHEATING
result = lib.set_secure_settings(id, byref(secure_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
edges_settings = edges_settings_t()
class BorderFlags_:
BORDERS_SWAP_MISSET_DETECTION = 8
BORDER_STOP_RIGHT = 4
BORDER_STOP_LEFT = 2
BORDER_IS_ENCODER = 1
class EnderFlags_:
ENDER_SW2_ACTIVE_LOW = 4
ENDER_SW1_ACTIVE_LOW = 2
ENDER_SWAP = 1
edges_settings.EnderFlags = EnderFlags_.ENDER_SW2_ACTIVE_LOW
edges_settings.LeftBorder = 5
edges_settings.uLeftBorder = 0
edges_settings.RightBorder = 195
edges_settings.uRightBorder = 0
result = lib.set_edges_settings(id, byref(edges_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
pid_settings = pid_settings_t()
pid_settings.KpU = 0
pid_settings.KiU = 0
pid_settings.KdU = 0
pid_settings.Kpf = 0.003599999938160181
pid_settings.Kif = 0.03799999877810478
pid_settings.Kdf = 2.8000000384054147e-05
result = lib.set_pid_settings(id, byref(pid_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_in_settings = sync_in_settings_t()
class SyncInFlags_:
SYNCIN_GOTOPOSITION = 4
SYNCIN_INVERT = 2
SYNCIN_ENABLED = 1
sync_in_settings.ClutterTime = 4
sync_in_settings.Position = 0
sync_in_settings.uPosition = 0
sync_in_settings.Speed = 0
sync_in_settings.uSpeed = 0
result = lib.set_sync_in_settings(id, byref(sync_in_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
sync_out_settings = sync_out_settings_t()
class SyncOutFlags_:
SYNCOUT_ONPERIOD = 64
SYNCOUT_ONSTOP = 32
SYNCOUT_ONSTART = 16
SYNCOUT_IN_STEPS = 8
SYNCOUT_INVERT = 4
SYNCOUT_STATE = 2
SYNCOUT_ENABLED = 1
sync_out_settings.SyncOutFlags = SyncOutFlags_.SYNCOUT_ONSTOP | SyncOutFlags_.SYNCOUT_ONSTART
sync_out_settings.SyncOutPulseSteps = 100
sync_out_settings.SyncOutPeriod = 2000
sync_out_settings.Accuracy = 0
sync_out_settings.uAccuracy = 0
result = lib.set_sync_out_settings(id, byref(sync_out_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extio_settings = extio_settings_t()
class EXTIOSetupFlags_:
EXTIO_SETUP_INVERT = 2
EXTIO_SETUP_OUTPUT = 1
extio_settings.EXTIOSetupFlags = EXTIOSetupFlags_.EXTIO_SETUP_OUTPUT
class EXTIOModeFlags_:
EXTIO_SETUP_MODE_OUT_BITS = 240
EXTIO_SETUP_MODE_OUT_MOTOR_ON = 64
EXTIO_SETUP_MODE_OUT_ALARM = 48
EXTIO_SETUP_MODE_OUT_MOVING = 32
EXTIO_SETUP_MODE_OUT_ON = 16
EXTIO_SETUP_MODE_IN_BITS = 15
EXTIO_SETUP_MODE_IN_ALARM = 5
EXTIO_SETUP_MODE_IN_HOME = 4
EXTIO_SETUP_MODE_IN_MOVR = 3
EXTIO_SETUP_MODE_IN_PWOF = 2
EXTIO_SETUP_MODE_IN_STOP = 1
EXTIO_SETUP_MODE_IN_NOP = 0
EXTIO_SETUP_MODE_OUT_OFF = 0
extio_settings.EXTIOModeFlags = EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_STOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_IN_NOP | EXTIOModeFlags_.EXTIO_SETUP_MODE_OUT_OFF
result = lib.set_extio_settings(id, byref(extio_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
brake_settings = brake_settings_t()
brake_settings.t1 = 300
brake_settings.t2 = 500
brake_settings.t3 = 300
brake_settings.t4 = 400
class BrakeFlags_:
BRAKE_ENG_PWROFF = 2
BRAKE_ENABLED = 1
brake_settings.BrakeFlags = BrakeFlags_.BRAKE_ENG_PWROFF
result = lib.set_brake_settings(id, byref(brake_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
control_settings = control_settings_t()
control_settings.MaxSpeed[0] = 27
control_settings.MaxSpeed[1] = 0
control_settings.MaxSpeed[2] = 0
control_settings.MaxSpeed[3] = 0
control_settings.MaxSpeed[4] = 0
control_settings.MaxSpeed[5] = 0
control_settings.MaxSpeed[6] = 0
control_settings.MaxSpeed[7] = 0
control_settings.MaxSpeed[8] = 0
control_settings.MaxSpeed[9] = 0
control_settings.uMaxSpeed[0] = 0
control_settings.uMaxSpeed[1] = 0
control_settings.uMaxSpeed[2] = 0
control_settings.uMaxSpeed[3] = 0
control_settings.uMaxSpeed[4] = 0
control_settings.uMaxSpeed[5] = 0
control_settings.uMaxSpeed[6] = 0
control_settings.uMaxSpeed[7] = 0
control_settings.uMaxSpeed[8] = 0
control_settings.uMaxSpeed[9] = 0
control_settings.Timeout[0] = 1000
control_settings.Timeout[1] = 1000
control_settings.Timeout[2] = 1000
control_settings.Timeout[3] = 1000
control_settings.Timeout[4] = 1000
control_settings.Timeout[5] = 1000
control_settings.Timeout[6] = 1000
control_settings.Timeout[7] = 1000
control_settings.Timeout[8] = 1000
control_settings.MaxClickTime = 300
class Flags_:
CONTROL_BTN_RIGHT_PUSHED_OPEN = 8
CONTROL_BTN_LEFT_PUSHED_OPEN = 4
CONTROL_MODE_BITS = 3
CONTROL_MODE_LR = 2
CONTROL_MODE_JOY = 1
CONTROL_MODE_OFF = 0
control_settings.Flags = Flags_.CONTROL_MODE_LR | Flags_.CONTROL_MODE_OFF
control_settings.DeltaPosition = 1
control_settings.uDeltaPosition = 0
result = lib.set_control_settings(id, byref(control_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
joystick_settings = joystick_settings_t()
joystick_settings.JoyLowEnd = 0
joystick_settings.JoyCenter = 5000
joystick_settings.JoyHighEnd = 10000
joystick_settings.ExpFactor = 100
joystick_settings.DeadZone = 50
class JoyFlags_:
JOY_REVERSE = 1
result = lib.set_joystick_settings(id, byref(joystick_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
ctp_settings = ctp_settings_t()
ctp_settings.CTPMinError = 3
class CTPFlags_:
CTP_ERROR_CORRECTION = 16
REV_SENS_INV = 8
CTP_ALARM_ON_ERROR = 4
CTP_BASE = 2
CTP_ENABLED = 1
ctp_settings.CTPFlags = CTPFlags_.CTP_ERROR_CORRECTION
result = lib.set_ctp_settings(id, byref(ctp_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
uart_settings = uart_settings_t()
uart_settings.Speed = 115200
class UARTSetupFlags_:
UART_STOP_BIT = 8
UART_PARITY_BIT_USE = 4
UART_PARITY_BITS = 3
UART_PARITY_BIT_MARK = 3
UART_PARITY_BIT_SPACE = 2
UART_PARITY_BIT_ODD = 1
UART_PARITY_BIT_EVEN = 0
uart_settings.UARTSetupFlags = UARTSetupFlags_.UART_PARITY_BIT_EVEN
result = lib.set_uart_settings(id, byref(uart_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
controller_name = controller_name_t()
controller_name.ControllerName = bytes([0, 113, 252, 118, 36, 0, 72, 0, 3, 0, 0, 0, 104, 101, 103, 0])
class CtrlFlags_:
EEPROM_PRECEDENCE = 1
result = lib.set_controller_name(id, byref(controller_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
emf_settings = emf_settings_t()
emf_settings.L = 0
emf_settings.R = 0
emf_settings.Km = 0
class BackEMFFlags_:
BACK_EMF_KM_AUTO = 4
BACK_EMF_RESISTANCE_AUTO = 2
BACK_EMF_INDUCTANCE_AUTO = 1
result = lib.set_emf_settings(id, byref(emf_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
engine_advansed_setup = engine_advansed_setup_t()
engine_advansed_setup.stepcloseloop_Kw = 50
engine_advansed_setup.stepcloseloop_Kp_low = 1000
engine_advansed_setup.stepcloseloop_Kp_high = 33
result = lib.set_engine_advansed_setup(id, byref(engine_advansed_setup))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
extended_settings = extended_settings_t()
extended_settings.Param1 = 0
result = lib.set_extended_settings(id, byref(extended_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_name = stage_name_t()
stage_name.PositionerName = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_name(id, byref(stage_name))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_information = stage_information_t()
stage_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
stage_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_stage_information(id, byref(stage_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
stage_settings = stage_settings_t()
stage_settings.LeadScrewPitch = 0
stage_settings.Units = bytes([0, 0, 0, 0, 0, 0, 0, 0])
stage_settings.MaxSpeed = 0
stage_settings.TravelRange = 0
stage_settings.SupplyVoltageMin = 0
stage_settings.SupplyVoltageMax = 0
stage_settings.MaxCurrentConsumption = 0
stage_settings.HorizontalLoadCapacity = 0
stage_settings.VerticalLoadCapacity = 0
result = lib.set_stage_settings(id, byref(stage_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_information = motor_information_t()
motor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
motor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_motor_information(id, byref(motor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
motor_settings = motor_settings_t()
class MotorType_:
MOTOR_TYPE_BLDC = 3
MOTOR_TYPE_DC = 2
MOTOR_TYPE_STEP = 1
MOTOR_TYPE_UNKNOWN = 0
motor_settings.MotorType = MotorType_.MOTOR_TYPE_UNKNOWN
motor_settings.ReservedField = 0
motor_settings.Poles = 0
motor_settings.Phases = 0
motor_settings.NominalVoltage = 0
motor_settings.NominalCurrent = 0
motor_settings.NominalSpeed = 0
motor_settings.NominalTorque = 0
motor_settings.NominalPower = 0
motor_settings.WindingResistance = 0
motor_settings.WindingInductance = 0
motor_settings.RotorInertia = 0
motor_settings.StallTorque = 0
motor_settings.DetentTorque = 0
motor_settings.TorqueConstant = 0
motor_settings.SpeedConstant = 0
motor_settings.SpeedTorqueGradient = 0
motor_settings.MechanicalTimeConstant = 0
motor_settings.MaxSpeed = 0
motor_settings.MaxCurrent = 0
motor_settings.MaxCurrentTime = 0
motor_settings.NoLoadCurrent = 0
motor_settings.NoLoadSpeed = 0
result = lib.set_motor_settings(id, byref(motor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_information = encoder_information_t()
encoder_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
encoder_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_encoder_information(id, byref(encoder_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
encoder_settings = encoder_settings_t()
encoder_settings.MaxOperatingFrequency = 0
encoder_settings.SupplyVoltageMin = 0
encoder_settings.SupplyVoltageMax = 0
encoder_settings.MaxCurrentConsumption = 0
encoder_settings.PPR = 0
class EncoderSettings_:
ENCSET_REVOLUTIONSENSOR_ACTIVE_HIGH = 256
ENCSET_REVOLUTIONSENSOR_PRESENT = 64
ENCSET_INDEXCHANNEL_PRESENT = 16
ENCSET_PUSHPULL_OUTPUT = 4
ENCSET_DIFFERENTIAL_OUTPUT = 1
result = lib.set_encoder_settings(id, byref(encoder_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_information = hallsensor_information_t()
hallsensor_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
hallsensor_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_hallsensor_information(id, byref(hallsensor_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
hallsensor_settings = hallsensor_settings_t()
hallsensor_settings.MaxOperatingFrequency = 0
hallsensor_settings.SupplyVoltageMin = 0
hallsensor_settings.SupplyVoltageMax = 0
hallsensor_settings.MaxCurrentConsumption = 0
hallsensor_settings.PPR = 0
result = lib.set_hallsensor_settings(id, byref(hallsensor_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_information = gear_information_t()
gear_information.Manufacturer = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
gear_information.PartNumber = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
result = lib.set_gear_information(id, byref(gear_information))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
gear_settings = gear_settings_t()
gear_settings.ReductionIn = 1
gear_settings.ReductionOut = 1
gear_settings.RatedInputTorque = 0
gear_settings.RatedInputSpeed = 0
gear_settings.MaxOutputBacklash = 0
gear_settings.InputInertia = 0
gear_settings.Efficiency = 0
result = lib.set_gear_settings(id, byref(gear_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
accessories_settings = accessories_settings_t()
accessories_settings.MagneticBrakeInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.MBRatedVoltage = 0
accessories_settings.MBRatedCurrent = 0
accessories_settings.MBTorque = 0
class MBSettings_:
MB_POWERED_HOLD = 2
MB_AVAILABLE = 1
accessories_settings.TemperatureSensorInfo = bytes([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
accessories_settings.TSMin = 0
accessories_settings.TSMax = 0
accessories_settings.TSGrad = 0
class TSSettings_:
TS_AVAILABLE = 8
TS_TYPE_BITS = 7
TS_TYPE_SEMICONDUCTOR = 2
TS_TYPE_THERMOCOUPLE = 1
TS_TYPE_UNKNOWN = 0
accessories_settings.TSSettings = TSSettings_.TS_TYPE_THERMOCOUPLE | TSSettings_.TS_TYPE_UNKNOWN
class LimitSwitchesSettings_:
LS_SHORTED = 16
LS_SW2_ACTIVE_LOW = 8
LS_SW1_ACTIVE_LOW = 4
LS_ON_SW2_AVAILABLE = 2
LS_ON_SW1_AVAILABLE = 1
result = lib.set_accessories_settings(id, byref(accessories_settings))
if result != Result.Ok:
if worst_result == Result.Ok or worst_result == Result.ValueError:
worst_result = result
return worst_result
| [
"[email protected]"
] | |
a8824158345cddc59be0477b3353fbdb3dbef6da | fc2d2163e790741de0c0e1aa337948cfeb5b6ba9 | /tests/benchmarks/micro/NestedFunctionClosure.py | 925ad5c31e3bbdfcba39ca286facd4a95fe0b59e | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | nmoehrle/Nuitka | bcd20531f150ada82c8414620dca6c5424be64d1 | 317d1e4e49ef8b3bdfe2f80f2464040d644588b2 | refs/heads/master | 2023-06-22T09:56:23.604822 | 2017-11-29T14:10:01 | 2017-11-29T14:10:01 | 122,110,166 | 0 | 0 | Apache-2.0 | 2018-02-19T19:29:05 | 2018-02-19T19:29:05 | null | UTF-8 | Python | false | false | 1,066 | py | # Copyright 2017, Kay Hayen, mailto:[email protected]
#
# Python test originally created or extracted from other peoples work. The
# parts from me are licensed as below. It is at least Free Software where
# it's copied from other people. In these cases, that will normally be
# indicated.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
x = 1
def f():
c = x+1
def g():
return c
return g()
def caller():
for i in range(10000):
f()
if __name__ == "__main__":
caller()
| [
"[email protected]"
] | |
c4070e57949127be2bf575ae160cb07672a86fd4 | 064404a6e65dc4bb78624e47fb8010615e20fbe8 | /opsgenie_sdk/api/alert/add_details_to_alert_payload.py | bbc3a87ef3d5178e5be8868d78923a57e5e51352 | [
"Apache-2.0"
] | permissive | lyongjie20/opsgenie-python-sdk | 97de823d958995f44b1934c1aaf1b5740a8efd1e | 0d20d2314522fc0fd8ca5f0faa16f7c96387e123 | refs/heads/master | 2023-07-01T14:31:27.379893 | 2021-08-02T13:30:07 | 2021-08-02T13:30:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,705 | py | # coding: utf-8
"""
Python SDK for Opsgenie REST API
Python SDK for Opsgenie REST API # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class AddDetailsToAlertPayload(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'user': 'str',
'note': 'str',
'source': 'str',
'details': 'dict(str, str)'
}
attribute_map = {
'user': 'user',
'note': 'note',
'source': 'source',
'details': 'details'
}
def __init__(self, user=None, note=None, source=None, details=None): # noqa: E501
"""AddDetailsToAlertPayload - a model defined in OpenAPI""" # noqa: E501
self._user = None
self._note = None
self._source = None
self._details = None
self.discriminator = None
if user is not None:
self.user = user
if note is not None:
self.note = note
if source is not None:
self.source = source
self.details = details
@property
def user(self):
"""Gets the user of this AddDetailsToAlertPayload. # noqa: E501
Display name of the request owner # noqa: E501
:return: The user of this AddDetailsToAlertPayload. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this AddDetailsToAlertPayload.
Display name of the request owner # noqa: E501
:param user: The user of this AddDetailsToAlertPayload. # noqa: E501
:type: str
"""
self._user = user
@property
def note(self):
"""Gets the note of this AddDetailsToAlertPayload. # noqa: E501
Additional note that will be added while creating the alert # noqa: E501
:return: The note of this AddDetailsToAlertPayload. # noqa: E501
:rtype: str
"""
return self._note
@note.setter
def note(self, note):
"""Sets the note of this AddDetailsToAlertPayload.
Additional note that will be added while creating the alert # noqa: E501
:param note: The note of this AddDetailsToAlertPayload. # noqa: E501
:type: str
"""
self._note = note
@property
def source(self):
"""Gets the source of this AddDetailsToAlertPayload. # noqa: E501
Source field of the alert. Default value is IP address of the incoming request # noqa: E501
:return: The source of this AddDetailsToAlertPayload. # noqa: E501
:rtype: str
"""
return self._source
@source.setter
def source(self, source):
"""Sets the source of this AddDetailsToAlertPayload.
Source field of the alert. Default value is IP address of the incoming request # noqa: E501
:param source: The source of this AddDetailsToAlertPayload. # noqa: E501
:type: str
"""
self._source = source
@property
def details(self):
"""Gets the details of this AddDetailsToAlertPayload. # noqa: E501
Key-value pairs to add as custom property into alert. # noqa: E501
:return: The details of this AddDetailsToAlertPayload. # noqa: E501
:rtype: dict(str, str)
"""
return self._details
@details.setter
def details(self, details):
"""Sets the details of this AddDetailsToAlertPayload.
Key-value pairs to add as custom property into alert. # noqa: E501
:param details: The details of this AddDetailsToAlertPayload. # noqa: E501
:type: dict(str, str)
"""
if details is None:
raise ValueError("Invalid value for `details`, must not be `None`") # noqa: E501
self._details = details
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AddDetailsToAlertPayload):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
44d6d277ce2adec9b2f94b011620101c7ccec4f4 | 1f31dc44aca58992aa478635cfe036566a7eebe2 | /py/kubeflow/kfctl/testing/ci/update_jupyter_web_app.py | ff987dbaf3747e37071a3b1067faf73ea5262fbd | [
"Apache-2.0"
] | permissive | adrian555/kfctl | ee3c8517da358884aba35e20bdf564d903aa1d66 | a6bd7d4c15571492a08a551b7566cb111b39bd0c | refs/heads/master | 2020-12-15T18:38:30.190800 | 2020-10-26T20:50:24 | 2020-10-26T20:50:24 | 269,209,194 | 0 | 5 | Apache-2.0 | 2020-06-03T22:38:51 | 2020-06-03T22:38:50 | null | UTF-8 | Python | false | false | 10,444 | py | """Script to build and update the Jupyter WebApp image.
Requires python3
hub CLI depends on an OAuth token with repo permissions:
https://hub.github.com/hub.1.html
* It will look for environment variable GITHUB_TOKEN
"""
import logging
import os
import tempfile
import yaml
import fire
import git
import httplib2
from kubeflow.kfctl.testing.util import application_util
from kubeflow.testing import util # pylint: disable=no-name-in-module
from containerregistry.client import docker_creds
from containerregistry.client import docker_name
from containerregistry.client.v2_2 import docker_http
from containerregistry.client.v2_2 import docker_image as v2_2_image
from containerregistry.transport import transport_pool
# The image name as defined in the kustomization file
JUPYTER_WEB_APP_IMAGE_NAME = "gcr.io/kubeflow-images-public/jupyter-web-app"
class WebAppUpdater(object): # pylint: disable=useless-object-inheritance
def __init__(self):
self._last_commit = None
self.manifests_repo_dir = None
def build_image(self, build_project, registry_project):
"""Build the image.
Args:
build_project: GCP project used to build the image.
registry_project: GCP project used to host the image.
"""
env = dict()
env.update(os.environ)
env["PROJECT"] = build_project
env["REGISTRY_PROJECT"] = registry_project
env["GIT_TAG"] = self._last_commit
with tempfile.NamedTemporaryFile() as hf:
name = hf.name
env["OUTPUT"] = name
web_dir = self._component_dir()
util.run(["make", "build-gcb"], env=env, cwd=web_dir)
# TODO(jlewi): We want to get the actual image produced by GCB. Right
# now this is a bit brittle because we have multiple layers of substitution
# e.g. in the Makefile and then the GCB YAML.
# It might be better to parse the stdout of make-build-gcb to get the
# GCB job name and then fetch the GCB info specifying the images.
with open(name) as hf:
data = yaml.load(hf)
return data["image"]
@property
def last_commit(self):
"""Get the last commit of a change to the source for the jupyter-web-app."""
if not self._last_commit:
# Get the hash of the last commit to modify the source for the Jupyter web
# app image
self._last_commit = util.run(["git", "log", "-n", "1",
"--pretty=format:\"%h\"",
"components/jupyter-web-app"],
cwd=self._root_dir()).strip("\"")
return self._last_commit
def _find_remote_repo(self, repo, remote_url): # pylint: disable=no-self-use
"""Find the remote repo if it has already been added.
Args:
repo: The git python repo object.
remote_url: The URL of the remote repo e.g.
[email protected]:jlewi/kubeflow.git
Returns:
remote: git-python object representing the remote repo or none if it
isn't present.
"""
for r in repo.remotes:
for u in r.urls:
if remote_url == u:
return r
return None
def all(self, build_project, registry_project, remote_fork, # pylint: disable=too-many-statements,too-many-branches
kustomize_file, add_github_host=False):
"""Build the latest image and update the prototype.
Args:
build_project: GCP project used to build the image.
registry_project: GCP project used to host the image.
remote_fork: Url of the remote fork.
The remote fork used to create the PR;
e.g. [email protected]:jlewi/kubeflow.git. currently only ssh is
supported.
kustomize_file: Path to the kustomize file
add_github_host: If true will add the github ssh host to known ssh hosts.
"""
# TODO(jlewi): How can we automatically determine the root of the git
# repo containing the kustomize_file?
self.manifests_repo_dir = util.run(["git", "rev-parse", "--show-toplevel"],
cwd=os.path.dirname(kustomize_file))
repo = git.Repo(self.manifests_repo_dir)
util.maybe_activate_service_account()
last_commit = self.last_commit
# Ensure github.com is in the known hosts
if add_github_host:
output = util.run(["ssh-keyscan", "github.com"])
with open(os.path.join(os.getenv("HOME"), ".ssh", "known_hosts"),
mode='a') as hf:
hf.write(output)
if not remote_fork.startswith("[email protected]"):
raise ValueError("Remote fork currently only supports ssh")
remote_repo = self._find_remote_repo(repo, remote_fork)
if not remote_repo:
fork_name = remote_fork.split(":", 1)[-1].split("/", 1)[0]
logging.info("Adding remote %s=%s", fork_name, remote_fork)
remote_repo = repo.create_remote(fork_name, remote_fork)
logging.info("Last change to components-jupyter-web-app was %s", last_commit)
base = "gcr.io/{0}/jupyter-web-app".format(registry_project)
# Check if there is already an image tagged with this commit.
image = base + ":" + self.last_commit
transport = transport_pool.Http(httplib2.Http)
src = docker_name.from_string(image)
creds = docker_creds.DefaultKeychain.Resolve(src)
image_exists = False
try:
with v2_2_image.FromRegistry(src, creds, transport) as src_image:
logging.info("Image %s exists; digest: %s", image,
src_image.digest())
image_exists = True
except docker_http.V2DiagnosticException as e:
if e.status == 404:
logging.info("%s doesn't exist", image)
else:
raise
if not image_exists:
logging.info("Building the image")
image = self.build_image(build_project, registry_project)
logging.info("Created image: %s", image)
else:
logging.info("Image %s already exists", image)
# TODO(jlewi): What if the file was already modified so we didn't
# modify it in this run but we still need to commit it?
image_updated = application_util.set_kustomize_image(
kustomize_file, JUPYTER_WEB_APP_IMAGE_NAME, image)
if not image_updated:
logging.info("kustomization not updated so not creating a PR.")
return
application_util.regenerate_manifest_tests(self.manifests_repo_dir)
branch_name = "update_jupyter_{0}".format(last_commit)
if repo.active_branch.name != branch_name:
logging.info("Creating branch %s", branch_name)
branch_names = [b.name for b in repo.branches]
if branch_name in branch_names:
logging.info("Branch %s exists", branch_name)
util.run(["git", "checkout", branch_name], cwd=self.manifests_repo_dir)
else:
util.run(["git", "checkout", "-b", branch_name],
cwd=self.manifests_repo_dir)
if self._check_if_pr_exists(commit=last_commit):
# Since a PR already exists updating to the specified commit
# don't create a new one.
# We don't want to just push -f because if the PR already exists
# git push -f will retrigger the tests.
# To force a recreate of the PR someone could close the existing
# PR and a new PR will be created on the next cron run.
return
logging.info("Add file %s to repo", kustomize_file)
repo.index.add([kustomize_file])
repo.index.add([os.path.join(self.manifests_repo_dir, "tests/*")])
repo.index.commit("Update the jupyter web app image to {0}".format(image))
util.run(["git", "push", "-f", remote_repo.name,
"{0}:{0}".format(branch_name)],
cwd=self.manifests_repo_dir)
self.create_pull_request(commit=last_commit)
def _pr_title(self, commit):
pr_title = "[auto PR] Update the jupyter-web-app image to {0}".format(
commit)
return pr_title
def _check_if_pr_exists(self, commit=None):
"""Check if a PR is already open.
Returns:
exists: True if a PR updating the image to the specified commit already
exists and false otherwise.
"""
# TODO(jlewi): Modeled on
# https://github.com/kubeflow/examples/blob/master/code_search/docker/ks/update_index.sh
# TODO(jlewi): We should use the GitHub API and check if there is an
# existing open pull request. Or potentially just use the hub CLI.
if not commit:
commit = self.last_commit
logging.info("No commit specified defaulting to %s", commit)
pr_title = self._pr_title(commit)
# See hub conventions:
# https://hub.github.com/hub.1.html
# The GitHub repository is determined automatically based on the name
# of remote repositories
output = util.run(["hub", "pr", "list", "--format=%U;%t\n"],
cwd=self.manifests_repo_dir)
lines = output.splitlines()
prs = {}
for l in lines:
n, t = l.split(";", 1)
prs[t] = n
if pr_title in prs:
logging.info("PR %s already exists to update the Jupyter web app image "
"to %s", prs[pr_title], commit)
return True
return False
def create_pull_request(self, base="kubeflow:master", commit=None):
"""Create a pull request.
Args:
base: The base to use. Defaults to "kubeflow:master". This should be
in the form <GitHub OWNER>:<branch>
"""
pr_title = self._pr_title(commit)
with tempfile.NamedTemporaryFile(delete=False, mode="w") as hf:
hf.write(pr_title)
hf.write("\n")
hf.write("\n")
hf.write(
"Image built from commit https://github.com/kubeflow/kubeflow/"
"commit/{0}".format(self._last_commit))
message_file = hf.name
# TODO(jlewi): -f creates the pull requests even if there are local changes
# this was useful during development but we may want to drop it.
util.run(["hub", "pull-request", "-f", "--base=" + base, "-F",
message_file],
cwd=self.manifests_repo_dir)
def _root_dir(self):
this_dir = os.path.dirname(__file__)
return os.path.abspath(os.path.join(this_dir, "..", "..", "..", ".."))
def _component_dir(self):
return os.path.join(self._root_dir(), "components", "jupyter-web-app")
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
fire.Fire(WebAppUpdater)
| [
"[email protected]"
] | |
50a0e0a631826408e3f3cd6fd38ce599131e4588 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2506/60832/280621.py | f1113858470ab2074adaab3a1b0c3b72695ba34b | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | import numpy as np
ar = list(map(int, input().split(',')))
length = len(ar)
if length == 0:
print(0)
exit()
Max = np.zeros(length)
opt = np.zeros(length)
opt[0] = 1
Max[0] = ar[0]
for i in range(1, length):
a = opt[i - 1]
temp = ar[i]
j = i - 1
has = False
for j in range(i - 1, -1, -1):
if Max[j] < temp:
has = True
break
if has:
b = opt[j] + 1
else:
b = 1
if b > a:
Max[i] = ar[i]
opt[i] = b
elif b == a:
Max[i] = min(Max[i - 1], ar[i])
opt[i] = b
else:
Max[i] = Max[i - 1]
opt[i] = a
print(int(opt[length - 1]))
| [
"[email protected]"
] | |
d5edadbe66cf157c9e28ddd27acc016d3102d6ac | abef98cfa3fb2c4626eb8c0a77c1080992d9b11b | /python/bindiff/bindiff.py | 00885d7c9351425c339702dff4eee4738485d185 | [] | no_license | mikebentley15/sandbox | ff88ed9dc4b9ac37668142a319d0a8162e88e9e3 | 4f5869544de18be21f415a9d6f9b71c362307f27 | refs/heads/main | 2023-04-14T00:22:34.623441 | 2023-03-24T21:43:56 | 2023-03-24T21:43:56 | 116,987,549 | 6 | 3 | null | 2022-10-26T03:02:06 | 2018-01-10T17:14:54 | C++ | UTF-8 | Python | false | false | 1,521 | py | #!/usr/bin/env python3
'''
Diff two binary files byte by byte. Do not try to do insertions or deletions,
just a straight side-by-side comparison.
'''
import sys
import argparse
def parse_args(arguments):
'Parse and return parsed arguments'
parser = argparse.ArgumentParser(
description='''
Diff two binary files byte-by-byte. This is a simple comparison
operation, so no attempts to align based on insertions or
deletions, just a straight side-by-side comparison.
''')
parser.add_argument('file1')
parser.add_argument('file2')
args = parser.parse_args(arguments)
return args
def count_byte_diffs(file1, file2):
'Return # bytes different between file1 and file2 side-by-side'
diff_bytes = 0
with open(file1, 'rb') as fin1:
with open(file2, 'rb') as fin2:
while True:
c1 = fin1.read(1)
c2 = fin2.read(1)
# Handle end of file
if c1 == bytes():
return diff_bytes + len(c2) + len(fin2.read())
if c2 == bytes():
return diff_bytes + len(c1) + len(fin1.read())
# Diff
if c1 != c2:
diff_bytes += 1
def main(arguments):
'Main logic here'
args = parse_args(arguments)
diff_bytes = count_byte_diffs(args.file1, args.file2)
print(diff_bytes, 'bytes are different')
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| [
"[email protected]"
] | |
bbf76565b6f64ab27199de1efc446badfd0ba38a | 1bad7d2b7fc920ecf2789755ed7f44b039d4134d | /ABC/173/A.py | b409f92bf2697a9ef34d7cb880b50eb8c4da2cfe | [] | no_license | kanekyo1234/AtCoder_solve | ce95caafd31f7c953c0fc699f0f4897dddd7a159 | e5ea7b080b72a2a2fd3fcb826cd10c4ab2e2720e | refs/heads/master | 2023-04-01T04:01:15.885945 | 2021-04-06T04:03:31 | 2021-04-06T04:03:31 | 266,151,065 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py | n = int(input())
if n % 1000 == 0:
print(0)
else:
print(1000-n % 1000)
| [
"[email protected]"
] | |
a0acdd42e55260598a360131a282d5f7852e0d57 | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc054/D/answers/105471_s484.py | e511180c2fae7d4bf9960eae07c6d72b6540daf4 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 567 | py | N, Ma, Mb = map(int, input().split())
G = []
for i in range(N):
a, b, c = map(int, input().split())
G.append( (a,b,c) )
M = 1 + min(sum([ a for a, b, c in G ]),
sum([ b for a, b, c in G ]) )
INF = 1000000000
dp = [INF] * (M * M)
dp[0] = 0
for a, b, c in G:
i = M - a - 1
while i >= 0:
j = M - b - 1
while j >= 0:
dp[(i+a)*M+(j+b)] = min(dp[(i+a)*M+(j+b)], dp[i*M+j] + c)
j -= 1
i -= 1
ans = INF
x = 1
while Ma * x < M and Mb * x < M:
ans = min(ans, dp[Ma * x * M + Mb * x])
x += 1
if ans >= INF:
ans = -1
print(ans)
| [
"[email protected]"
] | |
1b2863c931ac97cc512170299b6e7d5844ead205 | add72f4d6f9f7af1f437d19213c14efb218b2194 | /icekit/page_types/author/tests.py | afcd6534c4231a59e310a4f281e7a4b7faa61d11 | [
"MIT"
] | permissive | ic-labs/django-icekit | 6abe859f97c709fcf51207b54778501b50436ff7 | c507ea5b1864303732c53ad7c5800571fca5fa94 | refs/heads/develop | 2022-08-08T21:26:04.144852 | 2018-01-08T02:55:17 | 2018-01-08T02:55:17 | 65,470,395 | 53 | 12 | MIT | 2022-07-06T19:59:39 | 2016-08-11T13:11:02 | Python | UTF-8 | Python | false | false | 2,128 | py | from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse
from django_dynamic_fixture import G
from django_webtest import WebTest
from fluent_contents.models import Placeholder
from . import models
User = get_user_model()
class AuthorTests(WebTest):
def setUp(self):
self.staff_1 = User.objects.create(
email='[email protected]',
is_staff=True,
is_active=True,
is_superuser=True,
)
# used to make the author's URL
self.author_listing = models.AuthorListing.objects.create(
author=self.staff_1,
title="Authors",
slug="authors",
)
self.author_1 = G(models.Author)
self.author_2 = G(models.Author)
def test_get_absolute_url(self):
self.assertEqual(
self.author_1.get_absolute_url(),
'/authors/%s/' % (
self.author_1.slug
)
)
def test_admin(self):
admin_app_list = (
('icekit_authors_author', self.author_1),
)
for admin_app, obj in admin_app_list:
response = self.app.get(
reverse('admin:%s_changelist' % admin_app),
user=self.staff_1
)
self.assertEqual(response.status_code, 200)
response = self.app.get(reverse('admin:%s_add' % admin_app), user=self.staff_1)
self.assertEqual(response.status_code, 200)
response = self.app.get(
reverse('admin:%s_history' % admin_app, args=(obj.id,)),
user=self.staff_1
)
self.assertEqual(response.status_code, 200)
response = self.app.get(
reverse('admin:%s_delete' % admin_app, args=(obj.id,)),
user=self.staff_1
)
self.assertEqual(response.status_code, 200)
response = self.app.get(
reverse('admin:%s_change' % admin_app, args=(obj.id,)),
user=self.staff_1
)
self.assertEqual(response.status_code, 200)
| [
"[email protected]"
] | |
f0de2600bd1e07a39f42bbe91fae645d210e66f7 | e489172f6e49e1239db56c047a78a29a6ffc0b36 | /via_account_taxform/account_tax.py | b1245ef236db95e377f0531f854b5c0588e892e0 | [] | no_license | eksotama/prln-via-custom-addons | f05d0059353ae1de89ccc8d1625a896c0215cfc7 | f2b44a8af0e7bee87d52d258fca012bf44ca876f | refs/heads/master | 2020-03-25T19:49:08.117628 | 2015-12-01T07:29:43 | 2015-12-01T07:29:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,478 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Vikasa Infinity Anugrah, PT
# Copyright (c) 2011 - 2013 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import fields, osv
class account_tax(osv.osv):
_inherit = "account.tax"
def _get_tax_category(self, cr, uid, context=None):
res = self.pool.get('code.decode').get_company_selection_for_category(cr, uid, 'via_account_taxform', 'tax_category', context=context)
return res
_columns = {
'tax_category': fields.selection(_get_tax_category, 'Tax Category'),
}
account_tax()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"aero@aero.(none)"
] | aero@aero.(none) |
a2514f32e71a028a6e1421e5456b756a92898f22 | ccfc0566cd646cbe1837affef08baec8cd245d3b | /src/robot/model/control.py | bafd41a03b8be08ca4906403ab2cae40e68f9a71 | [
"Apache-2.0",
"CC-BY-3.0"
] | permissive | bmalhi/robotframework | 9f395d3197cdd7925b8def3aeb50b14fc31e83e2 | eaadffabc98b587c108cc904e0e54ce368020dd7 | refs/heads/master | 2023-03-01T14:30:23.110935 | 2021-02-05T16:01:30 | 2021-02-05T16:02:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,750 | py | # Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.utils import setter, py3to2
from .body import Body, BodyItem
from .keyword import Keywords
@py3to2
@Body.register
class For(BodyItem):
type = BodyItem.FOR_TYPE
body_class = Body
repr_args = ('variables', 'flavor', 'values')
__slots__ = ['variables', 'flavor', 'values']
deprecate_keyword_attributes = True
def __init__(self, variables=(), flavor='IN', values=(), parent=None):
self.variables = variables
self.flavor = flavor
self.values = values
self.parent = parent
self.body = None
@setter
def body(self, body):
return self.body_class(self, body)
@property
def keywords(self):
"""Deprecated since Robot Framework 4.0. Use :attr:`body` instead."""
return Keywords(self, self.body)
@keywords.setter
def keywords(self, keywords):
Keywords.raise_deprecation_error()
@property
def source(self):
return self.parent.source if self.parent is not None else None
def visit(self, visitor):
visitor.visit_for(self)
def __str__(self):
variables = ' '.join(self.variables)
values = ' '.join(self.values)
return u'FOR %s %s %s' % (variables, self.flavor, values)
@py3to2
@Body.register
class If(BodyItem):
body_class = Body
repr_args = ('condition', 'type')
__slots__ = ['condition', 'type', '_orelse']
deprecate_keyword_attributes = True
def __init__(self, condition=None, type=BodyItem.IF_TYPE, parent=None):
self.condition = condition
self.type = type
self.parent = parent
self.body = None
self._orelse = None
@setter
def body(self, body):
return self.body_class(self, body)
@property # Cannot use @setter because it would create orelses recursively.
def orelse(self):
if self._orelse is None and self:
self._orelse = type(self)(type=None, parent=self)
return self._orelse
@orelse.setter
def orelse(self, orelse):
if orelse is None:
self._orelse = None
elif not isinstance(orelse, type(self)):
raise TypeError("Only %s objects accepted, got %s."
% (type(self).__name__, type(orelse).__name__))
else:
orelse.parent = self
self._orelse = orelse
@property
def source(self):
return self.parent.source if self.parent is not None else None
def config(self, **attributes):
BodyItem.config(self, **attributes)
if self.type is None:
self.type = self.ELSE_IF_TYPE if self.condition else self.ELSE_TYPE
return self
def visit(self, visitor):
if self:
visitor.visit_if(self)
def __str__(self):
if not self:
return u'None'
if not isinstance(self.parent, If):
return u'IF %s' % self.condition
if self.condition:
return u'ELSE IF %s' % self.condition
return u'ELSE'
def __bool__(self):
return self.type is not None
| [
"[email protected]"
] | |
c253c273fb8c240f6eda595e492a460e88fd798c | ebdb33a86794a779714318f8a0b8397c3d6002b5 | /processing/state_processing_dataframe.py | 04a3eff325b03a22706837096af11c0d1c700efc | [] | no_license | ShabbirHasan1/interactive-corporate-report | b52c6c404a3b2f96f27b3770b7086a59400fb74a | ce0d81ab775ded84334ce599950dae9adaa978c5 | refs/heads/master | 2023-05-16T17:15:52.651033 | 2020-02-20T05:32:46 | 2020-02-20T05:32:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,326 | py | import plotly.plotly as py
import pandas as pd
import numpy as np
import os
import _pickle as pickle
# rd = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv')
my_path = os.path.abspath(os.path.dirname('__file__'))
path_in_file = os.path.join(my_path, "../data/google/addresses_google.csv")
df_own = pd.read_csv(path_in_file)
path_in_ngrams = os.path.join(my_path, "../data/cpickle/")
df_own = df_own[df_own["country"] == "United States"].reset_index(drop=True)
df_own = df_own.fillna(df_own.mean())
df_own = df_own.rename(columns={"city_long": "state", "city_short": "code", "Female": "Female Rating"
, "Male": "Male Rating", "Patrons": "Patrons Rating", 'Average Customer Network': 'Connectedness',
"Male": "Male Rating", "Patrons": "Patrons Rating",
'Food Aestheticist': 'Food Aestheticist Rating',
'High Network': 'High Network Rating', 'Low Network': 'Low Network Rating',
'Connoisseur': 'Connoisseur Rating'})
df_own.loc[:, ["Total Network", "Number of Reviewers"]] = df_own.loc[:,
["Total Network", "Number of Reviewers"]].applymap(np.int32)
df_own.loc[:, ['Male to Female', 'Foreign to Local',
'Male Rating', 'Female Rating', 'Local', 'Foreign',
'High Network Rating', 'Low Network Rating', 'Connoisseur Rating',
'Food Aestheticist Rating', 'Patrons Rating', 'First Visit',
'Visual Importance', 'Female Importance', 'Foreign Importance',
'Connectedness', 'Average Rating']] = df_own.loc[:, ['Male to Female', 'Foreign to Local',
'Male Rating', 'Female Rating', 'Local', 'Foreign',
'High Network Rating', 'Low Network Rating',
'Connoisseur Rating',
'Food Aestheticist Rating', 'Patrons Rating',
'First Visit',
'Visual Importance', 'Female Importance',
'Foreign Importance',
'Connectedness', 'Average Rating']].applymap(
np.float32).round(3)
df_own.replace({'county': {'Anchorage': 'Anchorage Borough', 'Fairbanks North Star': 'Fairbanks North Star Borough',
'Matanuska-Susitna': 'Matanuska-Susitna Borough'}})
df_own["county_state"] = df_own["county"] + ", " + df_own["code"]
us = df_own[df_own["country"] == "United States"].reset_index(drop=True)
sep = us[["Total Network", "Number of Reviewers", "code"]]
us = us.drop(["Total Network", "Number of Reviewers"], axis=1)
all_firms_mean = us.groupby("code").mean().reset_index()
all_firms_sum = sep.groupby("code").sum().reset_index()
all_firms = pd.concat((all_firms_mean, all_firms_sum), axis=1)
all_firms.drop(["Unnamed: 0"], axis=1, inplace=True)
all_firms = all_firms.iloc[:, 1:]
all_dicts = {}
for i in df_own["target_small_name"].unique():
firm_lvl = df_own[df_own["target_small_name"] == i].reset_index()
sep_fir = firm_lvl[["Total Network", "Number of Reviewers", "code"]]
firm_lvl = firm_lvl.drop(["Total Network", "Number of Reviewers"], axis=1)
firm_lvl = firm_lvl.groupby("code").mean().reset_index()
sep_fir = sep_fir.groupby("code").sum().reset_index()
firms = pd.concat((firm_lvl, sep_fir), axis=1)
firms.drop(["index", "Unnamed: 0"], axis=1, inplace=True)
firms = firms.iloc[:, 1:]
all_dicts[i] = firms
all_dicts["All"] = all_firms
pickle.dump(all_dicts, open(path_in_ngrams + "all_dicts_state.p", "wb"))
# go = input_fields["short_name"].tolist()
# go.append("All")
# [dict(args=['z', value["Female"] ], label=key, method='restyle') for key, value in all_dicts.items()]
# updatemenus=list([dict(buttons = [[dict(args=['z', value["Female"] ], label=key, method='update') for key, value in all_dicts.items()]])]) | [
"[email protected]"
] | |
062ad9dc571345a7f470daa1d624c95fab71381b | 8f21513b8ba9e583246908006cac98e5e473e245 | /2_date_time.py | 4e845b6d46313ee7c1aa2f575d6f4ed98c8d58b0 | [] | no_license | MatsakB/Lesson3 | 6335bdeb86e3216e4576c170096d7bcf57ec4b2d | 69493f147720ac7d975421d7400e6964ecfac3a2 | refs/heads/master | 2020-04-10T06:35:22.069664 | 2018-12-08T08:45:20 | 2018-12-08T08:45:20 | 160,859,025 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 556 | py | from datetime import datetime, timedelta
date_today = datetime.now()
delta_days = timedelta(days=1)
delta_months = timedelta(days=30)
date_yesterday = date_today-delta_days
date_month_before = date_today-delta_months
print(date_today.strftime('%d.%m.%y'))
print(date_yesterday.strftime('%d.%m.%y'))
print(date_month_before.strftime('%d.%m.%y'))
#Превратите строку "01/01/17 12:10:03.234567" в объект datetime
d = "01/01/17 12:10:03.234567"
d_datetime_object = datetime.strptime(d,'%d/%m/%y %H:%M:%S.%f')
print(d_datetime_object) | [
"[email protected]"
] | |
dc768c5a2982a554ecbde52148df68d05e357efd | b167407960a3b69b16752590def1a62b297a4b0c | /tools/project-creator/Python2.6.6/Lib/test/test_transformer.py | eb8ef8ab031ce6e160d63f4fdf244baf063d4060 | [
"MIT"
] | permissive | xcode1986/nineck.ca | 543d1be2066e88a7db3745b483f61daedf5f378a | 637dfec24407d220bb745beacebea4a375bfd78f | refs/heads/master | 2020-04-15T14:48:08.551821 | 2019-01-15T07:36:06 | 2019-01-15T07:36:06 | 164,768,581 | 1 | 1 | MIT | 2019-01-15T08:30:27 | 2019-01-09T02:09:21 | C++ | UTF-8 | Python | false | false | 1,146 | py | import unittest
from test import test_support
# Silence Py3k warning
test_support.import_module('compiler', deprecated=True)
from compiler import transformer, ast
from compiler import compile
class Tests(unittest.TestCase):
def testMultipleLHS(self):
""" Test multiple targets on the left hand side. """
snippets = ['a, b = 1, 2',
'(a, b) = 1, 2',
'((a, b), c) = (1, 2), 3']
for s in snippets:
a = transformer.parse(s)
assert isinstance(a, ast.Module)
child1 = a.getChildNodes()[0]
assert isinstance(child1, ast.Stmt)
child2 = child1.getChildNodes()[0]
assert isinstance(child2, ast.Assign)
# This actually tests the compiler, but it's a way to assure the ast
# is correct
c = compile(s, '<string>', 'single')
vals = {}
exec c in vals
assert vals['a'] == 1
assert vals['b'] == 2
def test_main():
test_support.run_unittest(Tests)
if __name__ == "__main__":
test_main()
| [
"[email protected]"
] | |
f144fec17ff955f0806f4a5f976eb5a2072ff5dc | 92ae735d5dc6f6a094daedbd32614e714d0b8c4a | /newsletter/settings.py | 9ab91d32d35e7f7387b06c550e337e75ac0022a6 | [
"MIT"
] | permissive | Williano/Final-Senior-Year-Project- | 3b01ac9fd85753720b01c2245cf9b71648aad35d | 4bd988575537b37b5cf852b616d3db5666c95e7f | refs/heads/master | 2023-08-07T16:11:42.778492 | 2023-06-05T04:59:06 | 2023-06-05T04:59:06 | 121,346,340 | 173 | 60 | MIT | 2023-06-05T04:59:07 | 2018-02-13T06:17:16 | Python | UTF-8 | Python | false | false | 3,167 | py | from importlib import import_module
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from .utils import Singleton
class Settings(object):
"""
A settings object that proxies settings and handles defaults, inspired
by `django-appconf` and the way it works in `django-rest-framework`.
By default, a single instance of this class is created as `<app>_settings`,
from which `<APP>_SETTING_NAME` can be accessed as `SETTING_NAME`, i.e.::
from myapp.settings import myapp_settings
if myapp_settings.SETTING_NAME:
# DO FUNKY DANCE
If a setting has not been explicitly defined in Django's settings, defaults
can be specified as `DEFAULT_SETTING_NAME` class variable or property.
"""
__metaclass__ = Singleton
def __init__(self):
"""
Assert app-specific prefix.
"""
assert hasattr(self, 'settings_prefix'), 'No prefix specified.'
def __getattr__(self, attr):
"""
Return Django setting `PREFIX_SETTING` if explicitly specified,
otherwise return `PREFIX_SETTING_DEFAULT` if specified.
"""
if attr.isupper():
# Require settings to have uppercase characters
try:
setting = getattr(
django_settings,
'%s_%s' % (self.settings_prefix, attr),
)
except AttributeError:
if not attr.startswith('DEFAULT_'):
setting = getattr(self, 'DEFAULT_%s' % attr)
else:
raise
return setting
else:
# Default behaviour
raise AttributeError(
'No setting or default available for \'%s\'' % attr
)
class NewsletterSettings(Settings):
""" Django-newsletter specific settings. """
settings_prefix = 'NEWSLETTER'
DEFAULT_CONFIRM_EMAIL = True
@property
def DEFAULT_CONFIRM_EMAIL_SUBSCRIBE(self):
return self.CONFIRM_EMAIL
@property
def DEFAULT_CONFIRM_EMAIL_UNSUBSCRIBE(self):
return self.CONFIRM_EMAIL
@property
def DEFAULT_CONFIRM_EMAIL_UPDATE(self):
return self.CONFIRM_EMAIL
@property
def RICHTEXT_WIDGET(self):
# Import and set the richtext field
NEWSLETTER_RICHTEXT_WIDGET = getattr(
django_settings, "NEWSLETTER_RICHTEXT_WIDGET", ""
)
if NEWSLETTER_RICHTEXT_WIDGET:
try:
module, attr = NEWSLETTER_RICHTEXT_WIDGET.rsplit(".", 1)
mod = import_module(module)
return getattr(mod, attr)
except Exception as e:
# Catch ImportError and other exceptions too
# (e.g. user sets setting to an integer)
raise ImproperlyConfigured(
"Error while importing setting "
"NEWSLETTER_RICHTEXT_WIDGET %r: %s" % (
NEWSLETTER_RICHTEXT_WIDGET, e
)
)
return None
newsletter_settings = NewsletterSettings()
| [
"[email protected]"
] | |
ff3771b5c0f1674a5c668a6a8e2b5f78017cab70 | d83fde3c891f44014f5339572dc72ebf62c38663 | /_bin/google-cloud-sdk/.install/.backup/lib/googlecloudsdk/api_lib/auth/exceptions.py | b07bd1f9d71b922ba849cb4dc2a0eac9562fdd33 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | gyaresu/dotfiles | 047cc3ca70f4b405ba272856c69ee491a79d2ebe | e5e533b3a081b42e9492b228f308f6833b670cfe | refs/heads/master | 2022-11-24T01:12:49.435037 | 2022-11-01T16:58:13 | 2022-11-01T16:58:13 | 17,139,657 | 1 | 1 | null | 2020-07-25T14:11:43 | 2014-02-24T14:59:59 | Python | UTF-8 | Python | false | false | 1,206 | py | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""User errors raised by auth commands."""
from __future__ import absolute_import
from __future__ import unicode_literals
from googlecloudsdk.core import exceptions
class AuthenticationError(exceptions.Error):
"""Raised for errors reported by Oauth2client library."""
class InvalidCredentialsError(exceptions.Error):
"""Raised if credentials are not usable."""
class WrongAccountError(exceptions.Error):
"""Raised when credential account does not match expected account."""
class GitCredentialHelperError(exceptions.Error):
"""Raised for issues related to passing auth credentials to Git."""
| [
"[email protected]"
] | |
a2859f4ac719600fa16c18391c0265afda7857f5 | 238e46a903cf7fac4f83fa8681094bf3c417d22d | /VTK/vtk_7.1.1_x64_Release/lib/python2.7/site-packages/twisted/scripts/test/test_scripts.py | 8705a90f7287a1f2a71871bea4ec36b22d67d56a | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | baojunli/FastCAE | da1277f90e584084d461590a3699b941d8c4030b | a3f99f6402da564df87fcef30674ce5f44379962 | refs/heads/master | 2023-02-25T20:25:31.815729 | 2021-02-01T03:17:33 | 2021-02-01T03:17:33 | 268,390,180 | 1 | 0 | BSD-3-Clause | 2020-06-01T00:39:31 | 2020-06-01T00:39:31 | null | UTF-8 | Python | false | false | 6,855 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for the command-line scripts in the top-level I{bin/} directory.
Tests for actual functionality belong elsewhere, written in a way that doesn't
involve launching child processes.
"""
from os import devnull, getcwd, chdir
from sys import executable
from subprocess import PIPE, Popen
from twisted.trial.unittest import SkipTest, TestCase
from twisted.python.modules import getModule
from twisted.python.filepath import FilePath
from twisted.python.test.test_shellcomp import ZshScriptTestMixin
def outputFromPythonScript(script, *args):
"""
Synchronously run a Python script, with the same Python interpreter that
ran the process calling this function, using L{Popen}, using the given
command-line arguments, with standard input and standard error both
redirected to L{os.devnull}, and return its output as a string.
@param script: The path to the script.
@type script: L{FilePath}
@param args: The command-line arguments to follow the script in its
invocation (the desired C{sys.argv[1:]}).
@type args: L{tuple} of L{str}
@return: the output passed to the proces's C{stdout}, without any messages
from C{stderr}.
@rtype: L{bytes}
"""
nullInput = file(devnull, "rb")
nullError = file(devnull, "wb")
stdout = Popen([executable, script.path] + list(args),
stdout=PIPE, stderr=nullError, stdin=nullInput).stdout.read()
nullInput.close()
nullError.close()
return stdout
class ScriptTestsMixin:
"""
Mixin for L{TestCase} subclasses which defines a helper function for testing
a Twisted-using script.
"""
bin = getModule("twisted").pathEntry.filePath.child("bin")
def scriptTest(self, name):
"""
Verify that the given script runs and uses the version of Twisted
currently being tested.
This only works when running tests against a vcs checkout of Twisted,
since it relies on the scripts being in the place they are kept in
version control, and exercises their logic for finding the right version
of Twisted to use in that situation.
@param name: A path fragment, relative to the I{bin} directory of a
Twisted source checkout, identifying a script to test.
@type name: C{str}
@raise SkipTest: if the script is not where it is expected to be.
"""
script = self.bin.preauthChild(name)
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
from twisted.copyright import version
scriptVersion = outputFromPythonScript(script, '--version')
self.assertIn(str(version), scriptVersion)
class ScriptTests(TestCase, ScriptTestsMixin):
"""
Tests for the core scripts.
"""
def test_twistd(self):
self.scriptTest("twistd")
def test_twistdPathInsert(self):
"""
The twistd script adds the current working directory to sys.path so
that it's able to import modules from it.
"""
script = self.bin.child("twistd")
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
cwd = getcwd()
self.addCleanup(chdir, cwd)
testDir = FilePath(self.mktemp())
testDir.makedirs()
chdir(testDir.path)
testDir.child("bar.tac").setContent(
"import sys\n"
"print sys.path\n")
output = outputFromPythonScript(script, '-ny', 'bar.tac')
self.assertIn(repr(testDir.path), output)
def test_manhole(self):
self.scriptTest("manhole")
def test_trial(self):
self.scriptTest("trial")
def test_trialPathInsert(self):
"""
The trial script adds the current working directory to sys.path so that
it's able to import modules from it.
"""
script = self.bin.child("trial")
if not script.exists():
raise SkipTest(
"Script tests do not apply to installed configuration.")
cwd = getcwd()
self.addCleanup(chdir, cwd)
testDir = FilePath(self.mktemp())
testDir.makedirs()
chdir(testDir.path)
testDir.child("foo.py").setContent("")
output = outputFromPythonScript(script, 'foo')
self.assertIn("PASSED", output)
def test_pyhtmlizer(self):
self.scriptTest("pyhtmlizer")
def test_tap2rpm(self):
self.scriptTest("tap2rpm")
def test_tap2deb(self):
self.scriptTest("tap2deb")
def test_tapconvert(self):
self.scriptTest("tapconvert")
def test_deprecatedTkunzip(self):
"""
The entire L{twisted.scripts.tkunzip} module, part of the old Windows
installer tool chain, is deprecated.
"""
from twisted.scripts import tkunzip
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedTkunzip])
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(
"twisted.scripts.tkunzip was deprecated in Twisted 11.1.0: "
"Seek unzipping software outside of Twisted.",
warnings[0]['message'])
self.assertEqual(1, len(warnings))
def test_deprecatedTapconvert(self):
"""
The entire L{twisted.scripts.tapconvert} module is deprecated.
"""
from twisted.scripts import tapconvert
warnings = self.flushWarnings(
offendingFunctions=[self.test_deprecatedTapconvert])
self.assertEqual(DeprecationWarning, warnings[0]['category'])
self.assertEqual(
"twisted.scripts.tapconvert was deprecated in Twisted 12.1.0: "
"tapconvert has been deprecated.",
warnings[0]['message'])
self.assertEqual(1, len(warnings))
class ZshIntegrationTestCase(TestCase, ZshScriptTestMixin):
"""
Test that zsh completion functions are generated without error
"""
generateFor = [('twistd', 'twisted.scripts.twistd.ServerOptions'),
('trial', 'twisted.scripts.trial.Options'),
('pyhtmlizer', 'twisted.scripts.htmlizer.Options'),
('tap2rpm', 'twisted.scripts.tap2rpm.MyOptions'),
('tap2deb', 'twisted.scripts.tap2deb.MyOptions'),
('tapconvert', 'twisted.scripts.tapconvert.ConvertOptions'),
('manhole', 'twisted.scripts.manhole.MyOptions')
]
| [
"l”[email protected]“"
] | |
70bc331f3ab7dcdf0904d00a928becf959b12a5e | f3bd271bf00325881fb5b2533b9ef7f7448a75ec | /xcp2k/classes/_wf_correlation1.py | 946851bae269b8bc3790a4acf9f9905235827943 | [] | no_license | obaica/xcp2k | 7f99fc9d494859e16b9b0ea8e217b0493f4b2f59 | 6e15c2c95658f545102595dc1783f5e03a9e6916 | refs/heads/master | 2020-07-15T17:27:43.378835 | 2019-02-11T16:32:24 | 2019-02-11T16:32:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,305 | py | from xcp2k.inputsection import InputSection
from _mp2_info1 import _mp2_info1
from _direct_canonical1 import _direct_canonical1
from _wfc_gpw1 import _wfc_gpw1
from _ri_mp21 import _ri_mp21
from _opt_ri_basis1 import _opt_ri_basis1
from _ri_rpa1 import _ri_rpa1
from _ri_laplace1 import _ri_laplace1
from _cphf1 import _cphf1
from _interaction_potential3 import _interaction_potential3
from _eri_mme2 import _eri_mme2
class _wf_correlation1(InputSection):
def __init__(self):
InputSection.__init__(self)
self.Method = None
self.Memory = None
self.Scale_s = None
self.Scale_t = None
self.Group_size = None
self.Row_block = None
self.Col_block = None
self.Calc_cond_num = None
self.Ri_metric = None
self.Eri_method = None
self.Eri_blksize = None
self.Minimal_gap = None
self.MP2_INFO = _mp2_info1()
self.DIRECT_CANONICAL = _direct_canonical1()
self.WFC_GPW = _wfc_gpw1()
self.RI_MP2 = _ri_mp21()
self.OPT_RI_BASIS = _opt_ri_basis1()
self.RI_RPA = _ri_rpa1()
self.RI_LAPLACE = _ri_laplace1()
self.CPHF = _cphf1()
self.INTERACTION_POTENTIAL = _interaction_potential3()
self.ERI_MME = _eri_mme2()
self._name = "WF_CORRELATION"
self._keywords = {'Minimal_gap': 'MINIMAL_GAP', 'Group_size': 'GROUP_SIZE', 'Row_block': 'ROW_BLOCK', 'Calc_cond_num': 'CALC_COND_NUM', 'Scale_s': 'SCALE_S', 'Scale_t': 'SCALE_T', 'Memory': 'MEMORY', 'Eri_method': 'ERI_METHOD', 'Col_block': 'COL_BLOCK', 'Method': 'METHOD', 'Eri_blksize': 'ERI_BLKSIZE', 'Ri_metric': 'RI_METRIC'}
self._subsections = {'MP2_INFO': 'MP2_INFO', 'RI_RPA': 'RI_RPA', 'WFC_GPW': 'WFC_GPW', 'RI_LAPLACE': 'RI_LAPLACE', 'RI_MP2': 'RI_MP2', 'CPHF': 'CPHF', 'INTERACTION_POTENTIAL': 'INTERACTION_POTENTIAL', 'OPT_RI_BASIS': 'OPT_RI_BASIS', 'ERI_MME': 'ERI_MME', 'DIRECT_CANONICAL': 'DIRECT_CANONICAL'}
self._aliases = {'Row_block_size': 'Row_block', 'Number_proc': 'Group_size', 'Col_block_size': 'Col_block', 'Calc_condition_number': 'Calc_cond_num', 'Ri': 'Ri_metric'}
@property
def Number_proc(self):
"""
See documentation for Group_size
"""
return self.Group_size
@property
def Row_block_size(self):
"""
See documentation for Row_block
"""
return self.Row_block
@property
def Col_block_size(self):
"""
See documentation for Col_block
"""
return self.Col_block
@property
def Calc_condition_number(self):
"""
See documentation for Calc_cond_num
"""
return self.Calc_cond_num
@property
def Ri(self):
"""
See documentation for Ri_metric
"""
return self.Ri_metric
@Number_proc.setter
def Number_proc(self, value):
self.Group_size = value
@Row_block_size.setter
def Row_block_size(self, value):
self.Row_block = value
@Col_block_size.setter
def Col_block_size(self, value):
self.Col_block = value
@Calc_condition_number.setter
def Calc_condition_number(self, value):
self.Calc_cond_num = value
@Ri.setter
def Ri(self, value):
self.Ri_metric = value
| [
"[email protected]"
] | |
7588e0b50d82ff81490c180db39e55febd0d85ab | f8bdc46409c9f5eaf3d85ef157260589462d941a | /demos/instance_occlsegm/examples/synthetic2d/legacy/view_arc2017_occlusion.py | 408b6b042140818b8b6f50d4e6382df6fedf1f5c | [
"MIT",
"BSD-3-Clause"
] | permissive | start-jsk/jsk_apc | 2e268f8b65e9d7f4f9cc4416dc8383fd0a7b9750 | c4e349f45ef38457dc774e33f6902acf1a1540a6 | refs/heads/master | 2023-09-05T09:06:24.855510 | 2023-09-01T17:10:12 | 2023-09-01T17:10:12 | 25,620,908 | 36 | 25 | NOASSERTION | 2023-09-01T17:10:14 | 2014-10-23T05:28:31 | Common Lisp | UTF-8 | Python | false | false | 1,604 | py | #!/usr/bin/env python
import chainer_mask_rcnn
import instance_occlsegm_lib
import contrib
if __name__ == '__main__':
dataset = contrib.datasets.ARC2017OcclusionDataset(
split='train', do_aug=True)
def visualize_func(dataset, index):
img, bboxes, labels, lbls = dataset[index]
class_names = dataset.class_names
captions = [class_names[l] for l in labels]
vizs = []
for bbox, label, lbl, caption in \
zip(bboxes, labels, lbls, captions):
mask_bg = lbl == 0
mask_visible = lbl == 1
mask_invisible = lbl == 2
viz = chainer_mask_rcnn.utils.draw_instance_bboxes(
img, [bbox], [label], n_class=len(class_names),
masks=[mask_bg], captions=[caption])
vizs.append(viz)
viz = chainer_mask_rcnn.utils.draw_instance_bboxes(
img, [bbox], [label], n_class=len(class_names),
masks=[mask_visible], captions=[caption])
vizs.append(viz)
viz = chainer_mask_rcnn.utils.draw_instance_bboxes(
img, [bbox], [label], n_class=len(class_names),
masks=[mask_invisible], captions=[caption])
vizs.append(viz)
viz = instance_occlsegm_lib.image.tile(
vizs, (max(1, len(vizs) // 3), 3))
return viz
instance_occlsegm_lib.datasets.view_dataset(dataset, visualize_func)
# viz = instance_occlsegm_lib.image.resize(viz, size=1000 * 1000)
# instance_occlsegm_lib.io.imshow(viz)
# instance_occlsegm_lib.io.waitkey()
| [
"[email protected]"
] | |
3e7af994a6235be22aa1a34320c806ffcc69e7cd | 0ca1d8363439e0e34d7eaa54f021ff0b2940cac7 | /facturacion/migrations/0016_auto_20181130_1741.py | 8521ce266772004f3ca7a4e556672f389672ca4b | [] | no_license | geovanniberdugo/medhis | d6b606ef2c391738ee5fa4209712b6c0eb01ae40 | b8f8df111432bfab537853ed8e8dbd4603e9707d | refs/heads/main | 2023-02-13T19:44:33.699689 | 2021-01-15T22:08:35 | 2021-01-15T22:08:35 | 330,032,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 520 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-11-30 22:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facturacion', '0015_auto_20181120_1824'),
]
operations = [
migrations.AlterField(
model_name='caja',
name='verificacion_correcta',
field=models.BooleanField(default=True, help_text='Indica si los valores son correctos'),
),
]
| [
"[email protected]"
] | |
00f502880899ebc4ff3c56f1b131f1ba2ae7846c | 0e25538b2f24f1bc002b19a61391017c17667d3d | /xsharepoint/win_xspmanagedpath.py | 527691433ba4407838d8a7cd0c78122a6cfd4090 | [] | no_license | trondhindenes/Ansible-Auto-Generated-Modules | 725fae6ba9b0eef00c9fdc21179e2500dfd6725f | efa6ac8cd2b545116f24c1929936eb8cc5c8d337 | refs/heads/master | 2020-04-06T09:21:00.756651 | 2016-10-07T07:08:29 | 2016-10-07T07:08:29 | 36,883,816 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,584 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# <COPYRIGHT>
# <CODEGENMETA>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
DOCUMENTATION = '''
---
module: win_xspmanagedpath
version_added:
short_description: Generated from DSC module xsharepoint version 0.12.0.0 at 07.10.2016 02.56.36
description:
- This DSC module is used to deploy and configure SharePoint Server 2013, and convers a wide range of areas including web apps, service apps and farm configuration.
options:
Explicit:
description:
-
required: True
default:
aliases: []
HostHeader:
description:
-
required: True
default:
aliases: []
RelativeUrl:
description:
-
required: True
default:
aliases: []
WebAppUrl:
description:
-
required: True
default:
aliases: []
InstallAccount_username:
description:
-
required: False
default:
aliases: []
InstallAccount_password:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_username:
description:
-
required: False
default:
aliases: []
PsDscRunAsCredential_password:
description:
-
required: False
default:
aliases: []
AutoInstallModule:
description:
- If true, the required dsc resource/module will be auto-installed using the Powershell package manager
required: False
default: false
aliases: []
choices:
- true
- false
AutoConfigureLcm:
description:
- If true, LCM will be auto-configured for directly invoking DSC resources (which is a one-time requirement for Ansible DSC modules)
required: False
default: false
aliases: []
choices:
- true
- false
| [
"[email protected]"
] | |
d454ee79742bbf7b555240e05ae0700d83559c75 | 29fad6273eb43fcbaff7460b2b68fea66d9ebc8c | /custom-vpc/custom_vpc/custom_vpc_stack.py | 99ef9dd2bd53bf2c57edf14bd1ebc1b98829cff7 | [] | no_license | satishbr/cdk-demos | a3b2c7ca32551eb1c0264f8125f2ffc9413d00ff | 14568c885322f561d548de0d1175f3b60ee87df0 | refs/heads/master | 2022-11-10T16:58:38.105403 | 2020-04-15T10:20:19 | 2020-04-15T10:20:19 | 275,331,921 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | from aws_cdk import (
aws_ec2 as ec2,
core
)
class CustomVpcStack(core.Stack):
def __init__(self, scope: core.Construct, id: str, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
# The code that defines your stack goes here
# https://docs.aws.amazon.com/cdk/api/latest/python/aws_cdk.aws_ec2/Vpc.html
vpc = ec2.Vpc(
self, "MyVpc",
cidr="10.13.0.0/21",
max_azs=2,
nat_gateways=0,
subnet_configuration=[
ec2.SubnetConfiguration(name="public", cidr_mask=24, subnet_type=ec2.SubnetType.PUBLIC),
# ec2.SubnetConfiguration(name="private", cidr_mask=24, subnet_type=ec2.SubnetType.PRIVATE)
ec2.SubnetConfiguration(name="private", cidr_mask=24, subnet_type=ec2.SubnetType.ISOLATED)
]
)
# Tag all VPC Resources
core.Tag.add(vpc,key="Owner",value="KonStone",include_resource_types=[])
| [
"[email protected]"
] | |
96d3ef7124f1d20922a37c482305578a536be494 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/video/QVI_ID2930_for_PyTorch/datas/__init__.py | 3f2183896479f5bf7d870a252d3f4a6d07fccc07 | [
"GPL-1.0-or-later",
"Apache-2.0",
"MIT",
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 822 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from .Adobe240all import Adobe240all
from .QVI960 import QVI960
from .AIMSequence import AIMSequence
__all__ = ['Adobe240all', 'QVI960', 'AIMSequence',]
| [
"[email protected]"
] | |
84187813982783ed1d30663de5146be4acf7ac64 | 98e4686742146ec557a6c61a83540ca69f27f077 | /classification/Net/ResNetHeatMap/generate_heatmap.py | 399c3b1171a546f4d3636d38bd41260b4485603d | [] | no_license | UpCoder/Secret | b862c91d5229f7ceaa787475e5fc01f349a8cb98 | 15a9b7c50eaf4a5f4fd4856446d51fa4d7917c88 | refs/heads/master | 2021-07-09T19:13:41.816810 | 2017-10-10T04:16:30 | 2017-10-10T04:16:30 | 105,138,942 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,013 | py | # # -*- coding=utf-8 -*-
# # 根据我们训练好的模型来生成概率图模型
# from tools.image_operations import extract_patchs_return
# import tensorflow as tf
# from net_config import Net_Config as net_config
# from resnet import inference
# import numpy as np
# import sys
# import math
# from PIL import Image
# import os
# MOMENTUM = 0.9
#
# FLAGS = tf.app.flags.FLAGS
# tf.app.flags.DEFINE_string('train_dir', '/tmp/resnet_train',
# """Directory where to write event logs """
# """and checkpoint.""")
# tf.app.flags.DEFINE_string('save_model_dir', './models', 'the path using to save model')
# tf.app.flags.DEFINE_float('learning_rate', 0.01, "learning rate.")
# tf.app.flags.DEFINE_integer('batch_size', net_config.BATCH_SIZE, "batch size")
# tf.app.flags.DEFINE_integer('max_steps', 500000, "max steps")
# tf.app.flags.DEFINE_boolean('resume', True,
# 'resume from latest saved state')
# # def train(logits, label_value, image_pathes):
# # from image_processing import image_preprocessing
# # filenames = image_pathes
# # labels = label_value
# # filename, label = tf.train.slice_input_producer([filenames, labels], shuffle=True)
# # num_process_threads = 4
# # images_and_labels = []
# # for thread_id in range(num_process_threads):
# # image_buffer = tf.read_file(filename)
# # bbox = []
# # image = image_preprocessing(
# # image_buffer,
# # bbox=bbox,
# # train=False,
# # thread_id=thread_id
# # )
# # # image = tf.image.rgb_to_hsv(image)
# # images_and_labels.append([image, label])
# # batch_image, batch_label = tf.train.batch_join(
# # images_and_labels,
# # batch_size=FLAGS.batch_size,
# # capacity=2 * num_process_threads * FLAGS.batch_size
# # )
# # height = net_config.IMAGE_W
# # width = net_config.IMAGE_H
# # depth = 3
# #
# # images = tf.cast(batch_image, tf.float32)
# # images = tf.reshape(images, shape=[FLAGS.batch_size, height, width, depth])
# #
# #
# # print 'image shape is ', np.shape(images)
# # logits = inference(images,
# # num_classes=2,
# # is_training=False,
# # bottleneck=False)
# # global_step = tf.get_variable('global_step', [],
# # initializer=tf.constant_initializer(0),
# # trainable=False)
# # val_step = tf.get_variable('val_step', [],
# # initializer=tf.constant_initializer(0),
# # trainable=False)
# # predictions = tf.nn.softmax(logits)
# #
# # saver = tf.train.Saver(tf.all_variables())
# #
# # init = tf.global_variables_initializer()
# #
# # sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
# # sess.run(init)
# # sess.run(tf.initialize_local_variables())
# # tf.train.start_queue_runners(sess=sess)
# # print images.eval(session=sess)
# # if FLAGS.resume:
# # latest = tf.train.latest_checkpoint('/home/give/PycharmProjects/StomachCanner/classification/Net/ResNet/models/instance/5500.0/')
# # if not latest:
# # print "No checkpoint to continue from in", FLAGS.train_dir
# # sys.exit(1)
# # print "resume", latest
# # saver.restore(sess, latest)
# #
# # is_training = tf.placeholder('bool', [], name='is_training')
# # predictions_values = sess.run(
# # [predictions],
# # {
# # is_training: False
# # })
# # print predictions_values
# # predictions_values = np.argmax(predictions_values, axis=1)
# # print predictions_values
# '''
# 根据测试集的heating map得到分类结果
# '''
# def get_classification_result(image_dir):
# names = os.listdir(image_dir)
# image_pathes = [os.path.join(image_dir, name) for name in names]
# filenames = image_pathes
# print image_pathes
# [filename] = tf.train.slice_input_producer([filenames], shuffle=False, num_epochs=1)
# num_process_threads = 4
# images_and_labels = []
# from image_processing import image_preprocessing
# for thread_id in range(num_process_threads):
# image_buffer = tf.read_file(filename)
# bbox = []
# image = image_preprocessing(
# image_buffer,
# bbox=bbox,
# train=False,
# thread_id=thread_id
# )
# # image = tf.image.rgb_to_hsv(image)
# images_and_labels.append([image])
# batch_image = tf.train.batch_join(
# images_and_labels,
# batch_size=1,
# capacity=2 * num_process_threads * FLAGS.batch_size
# )
# height = net_config.IMAGE_W
# width = net_config.IMAGE_H
# depth = 3
#
# images = tf.cast(batch_image, tf.float32)
# images = tf.reshape(images, shape=[1, height, width, depth])
# print images
#
# logits = inference(images,
# num_classes=2,
# is_training=True,
# bottleneck=False, )
#
# saver = tf.train.Saver(tf.all_variables())
#
# init = tf.global_variables_initializer()
#
# sess = tf.Session(config=tf.ConfigProto(log_device_placement=False))
# sess.run(init)
# sess.run(tf.initialize_local_variables())
# tf.train.start_queue_runners(sess=sess)
#
# latest = tf.train.latest_checkpoint(
# '/home/give/PycharmProjects/StomachCanner/classification/Net/ResNetHeatMap/models/method5-512')
# if not latest:
# print "No checkpoint to continue from in", FLAGS.train_dir
# sys.exit(1)
# print "resume", latest
# saver.restore(sess, latest)
# predictions = tf.nn.softmax(logits)
# predictions_label = tf.argmax(predictions, axis=1)
# print predictions_label
# while True:
# prediction_value = sess.run(predictions_label)
# print prediction_value
# return images
#
# '''
# 针对heating map分类,返回每个heating map的分类结果
# '''
# def generate_prediction(patches):
#
# probability = tf.nn.softmax(logits)
# if FLAGS.resume:
# latest = tf.train.latest_checkpoint(
# '/home/give/PycharmProjects/StomachCanner/classification/Net/ResNetHeatMap/models/method5-512')
# if not latest:
# print "No checkpoint to continue from in", FLAGS.train_dir
# sys.exit(1)
# print "resume", latest
# saver.restore(sess, latest)
# probability_values = []
# start = 0
# batch_size = 512
# while start < len(patches):
# end = start + batch_size
# if end >= len(patches):
# end = len(patches)
# cur_patches = patches[start:end]
# probability_value = sess.run(
# probability,
# {
# img_tensor: cur_patches
# }
# )
# # print probability_value
# probability_values.extend(probability_value)
# # print 'logits value shape is ', np.shape(probability_value)
# start = end
# probability_values = np.asarray(probability_values, np.float32)
# return np.argmax(probability_values, axis=1)
#
# '''
# 加载已知模型,计算一个tiff文件的heat map
# :param tiff_path 一个tiff文件的path
# :param save_path 保存heat map 的路径 如果是None的话,则show
# '''
# def generate_heatmap(tiff_path, save_path):
# if os.path.exists(save_path):
# print 'Exists'
# return
# patches = extract_patchs_return(
# tiff_path=tiff_path,
# mask_dir=None,
# occupy_rate=None,
# stride=16,
# patch_size=256
# )
# patches = np.asarray(patches, np.float32)
# for index, patch in enumerate(patches):
# patch = np.asarray(patch, np.float32)
# patch = patch * (1.0 / np.max(patch))
# patches[index] = patch
# probability_value = generate_prediction(patches)
# print np.max(probability_value), np.min(probability_value)
# print probability_value
# w = int(math.sqrt(len(probability_value)))
# probability_img = Image.fromarray(np.asarray(np.reshape(probability_value, [w, w]) * 255, np.uint8))
# if save_path is not None:
# probability_img.save(save_path)
# else:
# probability_img.show()
#
#
# '''
# 加载已知模型,计算一个文件夹下面所有tiff文件的heat mapping
# :param tiff_path 一个tiff文件的path
# :param save_path 保存heat map 的路径 如果是None的话,则show
# '''
# def generate_heatmap_one_floder(tiff_dir, save_dir):
# names = os.listdir(tiff_dir)
# tiff_paths = [os.path.join(tiff_dir, name) for name in names]
# for index, tiff_path in enumerate(tiff_paths):
# name = names[index].split('.tiff')[0]
# generate_heatmap(tiff_path, os.path.join(save_dir, name+'.png'))
#
#
# '''
# 加载已知模型,计算多个文件夹下面所有tiff文件的heat mapping
# :param tiff_dirs 多个文件夹的路径
# :param save_dirs 对上面参数对应的保存的路径
#
# '''
# def generate_heatmap_multi_floder(tiff_dirs, save_dirs):
# for tiff_dir_index, tiff_dir in enumerate(tiff_dirs):
# save_dir = save_dirs[tiff_dir_index]
# names = os.listdir(tiff_dir)
# tiff_paths = [os.path.join(tiff_dir, name) for name in names]
# for index, tiff_path in enumerate(tiff_paths):
# name = names[index].split('.tiff')[0]
# generate_heatmap(tiff_path, os.path.join(save_dir, name+'.png'))
#
# if __name__ == '__main__':
# # generate_heatmap_multi_floder(
# # tiff_dirs=[
# # '/home/give/Documents/dataset/BOT_Game/val/positive',
# # '/home/give/Documents/dataset/BOT_Game/val/negative'
# # # '/home/give/Documents/dataset/BOT_Game/0-testdataset'
# # ],
# # save_dirs=[
# # # '/home/give/Documents/dataset/BOT_Game/0-testdataset-hm'
# # '/home/give/Documents/dataset/BOT_Game/val/positive-hm',
# # '/home/give/Documents/dataset/BOT_Game/val/negative-hm'
# # ]
# # )
# # from tools.image_operations import read_images
# # image_dir = '/home/give/Documents/dataset/BOT_Game/train/positive-test'
# # names = os.listdir(image_dir)
# # pathes = [os.path.join(image_dir, name) for name in names]
# # patches = read_images('/home/give/Documents/dataset/BOT_Game/train/positive-test')
# # for index, patch in enumerate(patches):
# # patch = np.asarray(patch, np.float32)
# # patch = patch * (1.0 / np.max(patch))
# # patches[index] = patch
# # print 'patch shape is ', np.shape(patches)
# # predicted = generate_prediction(patches)
# # print np.max(predicted), np.min(predicted)
# # print predicted
# # train(None, [0]*len(pathes), pathes)
# # get_classification_result('/home/give/Documents/dataset/BOT_Game/0-testdataset-hm/method5')
# get_classification_result('/home/give/Documents/dataset/BOT_Game/train/positive-hm/method5')
import tensorflow as tf
import os
from net_config import Net_Config as net_config
from resnet import inference
from DataSetBase import DataSetBase as DataSet
from image_processing import image_preprocessing
from resnet_val import val
import numpy as np
from PIL import Image
def file_list(data_dir):
dir_txt = data_dir + ".txt"
filenames = []
with open(dir_txt, 'r') as f:
for line in f:
if line[0] == '.': continue
line = line.rstrip()
fn = os.path.join(data_dir, line)
filenames.append(fn)
return filenames
def distorted_inputs_unit(dataset, trainable, shuffle=True):
filenames = dataset.images_names
labels = dataset.labels
filename, label = tf.train.slice_input_producer([filenames, labels], shuffle=shuffle)
num_process_threads = 4
images_and_labels = []
for thread_id in range(num_process_threads):
image_buffer = tf.read_file(filename)
bbox = []
image = image_preprocessing(
image_buffer,
bbox=bbox,
train=trainable,
thread_id=thread_id
)
# image = tf.image.rgb_to_hsv(image)
images_and_labels.append([image, label])
batch_image, batch_label = tf.train.batch_join(
images_and_labels,
batch_size=net_config.BATCH_SIZE,
capacity=2*num_process_threads*net_config.BATCH_SIZE
)
height = net_config.IMAGE_W
width = net_config.IMAGE_H
depth = 3
images = tf.cast(batch_image, tf.float32)
images = tf.reshape(images, shape=[net_config.BATCH_SIZE, height, width, depth])
return images, tf.reshape(batch_label, [net_config.BATCH_SIZE])
def distorted_inputs():
# data = load_data(FLAGS.data_dir)
# filenames = [ d['filename'] for d in data ]
# label_indexes = [ d['label_index'] for d in data ]
# train_positive_path = '/home/give/Documents/dataset/BOT_Game/train/positive-png'
# train_negative_path = '/home/give/Documents/dataset/BOT_Game/train/negative-copy'
# val_positive_path = '/home/give/Documents/dataset/BOT_Game/val/positive-png'
# val_negative_path = '/home/give/Documents/dataset/BOT_Game/val/negative-png'
train_positive_path = '/home/give/Documents/dataset/BOT_Game/train/negative-hm/method6'
train_negative_path = '/home/give/Documents/dataset/BOT_Game/train/negative-hm/method6'
val_positive_path = '/home/give/Documents/dataset/BOT_Game/val/positive-hm/method5'
val_negative_path = '/home/give/Documents/dataset/BOT_Game/val/negative-hm/method5'
val_dataset = DataSet(
positive_path=val_positive_path,
negative_path=val_negative_path
)
train_dataset = DataSet(
positive_path=train_positive_path,
negative_path=train_negative_path
)
return distorted_inputs_unit(train_dataset, False), distorted_inputs_unit(val_dataset, False)
def main(_):
predict_dir = '/home/give/Documents/dataset/BOT_Game/train/positive-hm/method5'
file_names = os.listdir(predict_dir)
file_pathes = [os.path.join(predict_dir, file_name) for file_name in file_names]
image_values = [np.array(Image.open(file_path).convert('RGB')) for file_path in file_pathes]
image_values = np.asarray(image_values, np.float32)
image_values = image_values[:net_config.BATCH_SIZE]
new_image_values = []
for index, image_value in enumerate(image_values):
image_value = np.asarray(image_value, np.float32)
image_value = image_value * (1.0 / np.max(image_value))
image_value = np.asarray(image_value, np.float32)
img = np.zeros([net_config.IMAGE_W, net_config.IMAGE_H, net_config.IMAGE_CHANNEL])
for j in range(net_config.IMAGE_CHANNEL):
img[:, :, j] = np.array(
Image.fromarray(image_value[:, :, j]).resize([net_config.IMAGE_W, net_config.IMAGE_H])
)
new_image_values.append(np.array(img))
image_values = np.array(new_image_values)
image_tensor = tf.placeholder(
tf.float32,
[net_config.BATCH_SIZE, net_config.IMAGE_W, net_config.IMAGE_H, net_config.IMAGE_CHANNEL]
)
label_tensor = tf.placeholder(
tf.int32,
[net_config.BATCH_SIZE]
)
logits = inference(image_tensor,
num_classes=2,
is_training=True,
bottleneck=False,)
save_model_path = '/home/give/PycharmProjects/StomachCanner/classification/Net/ResNetHeatMap/models/method5-512'
print 'image_tensor is ', image_tensor
print np.shape(image_values)
val(image_tensor, logits, image_values, label_tensor, [0]*len(image_values), save_model_path=save_model_path)
if __name__ == '__main__':
tf.app.run()
| [
"[email protected]"
] | |
258c7e7730f7dcfc58404705362466c414aa2af4 | 8b865eca2facf190369df4303fd6550c31614f72 | /project04/bagInterface.py | 49ca757766fdb7a64aa4d786bbe30b5e0f3d4fe9 | [] | no_license | Yamase31/cs112 | 16ba1732441e70065f2aded7542907ccb35e048e | 199c5731b0bcbd475d8a8d2c9429eaebfbc1d180 | refs/heads/main | 2023-06-30T13:34:43.086674 | 2021-08-10T03:14:45 | 2021-08-10T03:14:45 | 394,503,671 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,340 | py | """
Author: James Lawson, Harry Pinkerton, Laurie Jones
File: baginterface.py
Speficitactions of the methods for all bag classes. Running this code will
not produce any results, but it shows the headers and docstrings of the methods
that MUST be included or supported in any bag class.
"""
class BagInterface(object):
"""Interface for all bag types."""
# Constructor
def __init__(self, sourceCollection = None):
"""Sets the initial state of self, which includes the
contents of sourceCollection, if it's present."""
self._size = 0
self._modCount = 0
if sourceCollection:
for item in sourceCollection:
self.add(item)
#pass
# Accessor methods
def isEmpty(self):
"""Returns True if len(self) == 0, or False otherwise."""
if len(self) == 0:
return True
else:
return False
def count(self, target):
"""Returns the number of a specific items in self."""
""" Returns the number of instances of item in self"""
itemCount = 0
for nextItem in self:
if nextItem == target:
itemCount += 1
return itemCount
def __len__(self):
"""-Returns the number of items in self."""
return self._size
def __str__(self):
"""Returns the string representation of self."""
return "{" + ",".join(map(str, self)) + "}"
def __iter__(self):
"""Supports iteration over a view of self."""
return None
def __add__(self, other):
"""Returns a new bag containing the contents
of self and other."""
result = ArrayBag(self)
for item in other:
result.add(item)
return result
def __eq__(self, other):
"""Returns True if self equals other,
or False otherwise."""
return False
# Mutator methods
def clear(self):
"""Makes self become empty."""
pass
def add(self, item):
"""Adds item to self."""
self._items = Node(item, self._items)
self._size += 1
pass
def remove(self, item):
"""Precondition: item is in self.
Raises: KeyError if item in not in self.
Postcondition: item is removed from self."""
pass
| [
"[email protected]"
] | |
0681e4919822450a149df6a9ebf09f9bd101b37f | 2d837bca6989f61996e4e8e96635d722c97241c3 | /core/gtk_table.py | eaae443c2d7d162408c56f74d7190bb125d20de0 | [] | no_license | gsy/gmusic | 1485e11f4d63241f012b9e2ee27bbdb1ef563ce5 | 277e70c83a0ffcc00f2fc93933668dc16add11a8 | refs/heads/master | 2020-03-26T20:48:54.145376 | 2013-05-03T07:28:37 | 2013-05-03T07:28:37 | 9,724,424 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,189 | py | #!/usr/bin/env python
# -*- coding: utf-8
import pygtk
pygtk.require('2.0')
import gtk
class albumViewer:
def __init__(self):
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.connect("destroy", lambda w:gtk.main_quit())
self.table = gtk.Table(1, 2, False)
self.table.set_row_spacings(10)
self.table.set_col_spacings(10)
image1 = gtk.Image()
image1.set_from_file("1.jpg")
self.table.attach(image1, 0, 1, 0, 1, gtk.FILL|gtk.EXPAND, gtk.FILL|gtk.EXPAND, 10, 10)
self.scaleImage(image1, "1.jpg")
image2 = gtk.Image()
image2.set_from_file("2.jpg")
self.table.attach(image2, 0, 1, 1, 2, gtk.FILL|gtk.EXPAND, gtk.FILL|gtk.EXPAND, 10, 10)
self.scaleImage(image2, "2.jpg")
self.window.add(self.table)
self.window.show_all()
def scaleImage(self, image, file):
pixbuf = gtk.gdk.pixbuf_new_from_file(file)
scaled_buf = pixbuf.scale_simple(200, 200, gtk.gdk.INTERP_BILINEAR)
image.set_from_pixbuf(scaled_buf)
def main(self):
gtk.main()
if __name__ == '__main__':
albumViewer().main()
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.