blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
125eda5c2ea26724993805d2bdd9694df6fbe0fb | ab9eac7d27788b98bd3d43577bf11658fa6c67c5 | /src/clean_data.py | 2e7beaeb134717e44de0b902f242bee563130bad | [] | no_license | IkeyBenz/Instagram-Network-Graph | 1b0d5163b945a56ec024af77419bc03c3088bbac | 82ca93b94cb7b75b341683d4c20b489960c7378d | refs/heads/master | 2023-02-02T01:52:44.235220 | 2020-12-20T21:12:00 | 2020-12-20T21:12:00 | 321,414,811 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,355 | py | from os import listdir, path
from util import get_data_dir, get_mutual_followship_path, get_user_connections_path, get_authenticated_username
data_dir = get_data_dir()
authenticated_username = get_authenticated_username()
connections_path = get_user_connections_path()
def get_users_connections():
return set(open(connections_path).read().splitlines())
def correct_mutual_follwers():
for account in listdir(data_dir):
mutuals_path = get_mutual_followship_path(account)
if account is authenticated_username or not path.exists(mutuals_path):
continue
mutuals = set(open(mutuals_path).read().splitlines())
corrected = mutuals.intersection(get_users_connections())
with open(mutuals_path, 'w') as out:
out.write("\n".join(corrected))
def check_mutual_correctness():
for account in listdir(data_dir):
mutuals_path = get_mutual_followship_path(account)
if account is authenticated_username or not path.exists(mutuals_path):
continue
stored_mutuals = set(open(mutuals_path).read().splitlines())
extras = stored_mutuals.difference(get_users_connections())
if len(extras) > 0:
print(account, "has extra mutuals:", extras)
if __name__ == '__main__':
correct_mutual_follwers()
check_mutual_correctness()
| [
"[email protected]"
] | |
0afc429868366eb8eadd730a1566d020e31b6f46 | dbb32a7d5b96a94533b27a6ccf2474c660a863b7 | /containers/actor/sources/utils/__init__.py | 756cad2b8abb42638833a16139c9961fc42fd77d | [] | no_license | ankurhcu/FogBus2 | 772e8346c5e01e2aa8a02da9ef91fd696dd587a7 | 2cefabdd1d131fc8e9015ca31d414665e6014a69 | refs/heads/main | 2023-08-07T15:33:54.039724 | 2021-09-21T05:02:49 | 2021-09-21T05:02:49 | 410,610,212 | 1 | 0 | null | 2021-09-26T16:57:23 | 2021-09-26T16:57:22 | null | UTF-8 | Python | false | false | 1,823 | py | from .component import BasicComponent
from .component import PeriodicTaskRunner
from .config import ConfigActor
from .config import ConfigMaster
from .config import ConfigRemoteLogger
from .config import ConfigTaskExecutor
from .config import ConfigUser
from .connection import BasicMessageHandler
from .connection import MessageReceived
from .connection import MessageReceiver
from .connection import MessageSender
from .connection import MessageToSend
from .container import ContainerManager
from .container import ContainerManager
from .debugLogPrinter import DebugLogPrinter
from .resourceDiscovery import DiscoveredActors
from .resourceDiscovery import DiscoveredMasters
from .resourceDiscovery import DiscoveredRemoteLoggers
from .resourceDiscovery import ResourcesDiscovery
from .tools import camelToSnake
from .tools import decrypt
from .tools import encrypt
from .tools import filterIllegalCharacter
from .tools import newDebugLogger
from .tools import snakeToCamel
from .tools import terminate
from .types import ActorResources
from .types import Address
from .types import AutoDictionary
from .types import CannotBindAddr
from .types import Component
from .types import ComponentIdentity
from .types import ComponentRole
from .types import CPU
from .types import LoopSourceDestination
from .types import Memory
from .types import Message
from .types import MessageDoesNotContainSourceInfo
from .types import MessageDoesNotContainType
from .types import MessageSubSubType
from .types import MessageSubType
from .types import MessageType
from .types import PairsMedian
from .types import PeriodicTask
from .types import PeriodicTasks
from .types import ProcessingTime
from .types import Resources
from .types import SequenceMedian
from .types import SerializableDictionary
from .types import SynchronizedAttribute
| [
"[email protected]"
] | |
1a29ed7174a5e46688668e138299e976917f4743 | 34a9a91e6c3fbf427826d2cb2ad3d7c7a00ad0c0 | /collision_detection_program/SBI/beans/__init__.py | 87e717e7f04422002fd6fbaeabcf242107d76132 | [
"MIT"
] | permissive | structuralbioinformatics/SPServer | 015d7ede4b2c439c648b663b9af56a0ca98e277b | 946b7afdac16aef391ddd162daabfcc968eb9110 | refs/heads/master | 2021-04-23T14:02:10.935764 | 2020-07-24T09:00:19 | 2020-07-24T09:00:19 | 249,930,917 | 3 | 6 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | __all__ = [
"Singleton",
"Butler",
"File",
"FileError",
"StorableObject",
"Executable",
"Path",
"IndexedNum",
"JSONer"
]
from .singleton import Singleton
from .butler import Butler
from .file import (File, FileError)
from .StorableObject import StorableObject
from .Executable import Executable
from .Path import Path
from .IndexedNum import IndexedNum
from .JSONer import JSONer
| [
"[email protected]"
] | |
090b01787d67ad38963fba38a99e8b1e8a557d7c | 15581a76b36eab6062e71d4e5641cdfaf768b697 | /LeetCode_30days_challenge/2021/June/Pascal's Triangle.py | d5f72948364fa07f9c70d38dfef7769ff10d9ebb | [] | no_license | MarianDanaila/Competitive-Programming | dd61298cc02ca3556ebc3394e8d635b57f58b4d2 | 3c5a662e931a5aa1934fba74b249bce65a5d75e2 | refs/heads/master | 2023-05-25T20:03:18.468713 | 2023-05-16T21:45:08 | 2023-05-16T21:45:08 | 254,296,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | from typing import List
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if numRows == 1:
return [[1]]
rows = [[1], [1, 1]]
for i in range(2, numRows):
row = [1]
for j in range(1, i):
row.append(rows[-1][j] + rows[-1][j - 1])
row.append(1)
rows.append(row)
return rows
| [
"[email protected]"
] | |
c70c9bfee7433de27be912a8ac54969a41472e76 | 6a95112805b64322953429270a305d01fef3faea | /dist/weewx-4.0.0a9/bin/weewx/defaults.py | 98e51e336d2a837c2f8b4b3a9e2dac5943299e1d | [
"GPL-1.0-or-later",
"GPL-3.0-only",
"Apache-2.0"
] | permissive | tomdotorg/docker-weewx | c6d59dc492a9e53f3bc898f7b9f593717092d72c | 7085654f455d39b06acc688738fde27e1f78ad1e | refs/heads/main | 2023-06-08T17:57:44.184399 | 2023-01-30T11:21:23 | 2023-01-30T11:21:23 | 54,113,384 | 21 | 16 | Apache-2.0 | 2022-10-19T23:46:26 | 2016-03-17T11:39:29 | Dockerfile | UTF-8 | Python | false | false | 8,541 | py | # coding: utf-8
#
# Copyright (c) 2019 Tom Keffer <[email protected]>
#
# See the file LICENSE.txt for your rights.
#
"""Backstop defaults used in the absence of any other values."""
from __future__ import absolute_import
from six.moves import StringIO
import configobj
default_str = u"""# Copyright (c) 2009-2019 Tom Keffer <[email protected]>
# See the file LICENSE.txt for your rights.
# Where the skins reside, relative to WEEWX_ROOT
SKIN_ROOT = skins
# Where the generated reports should go, relative to WEEWX_ROOT
HTML_ROOT = public_html
# The database binding indicates which data should be used in reports.
data_binding = wx_binding
# Whether to log a successful operation
log_success = True
# Whether to log an unsuccessful operation
log_failure = False
# The following section determines the selection and formatting of units.
[Units]
# The following section sets what unit to use for each unit group.
# NB: The unit is always in the singular. I.e., 'mile_per_hour',
# NOT 'miles_per_hour'
[[Groups]]
group_altitude = foot # Options are 'foot' or 'meter'
group_degree_day = degree_F_day # Options are 'degree_F_day' or 'degree_C_day'
group_direction = degree_compass
group_distance = mile # Options are 'mile' or 'km'
group_moisture = centibar
group_percent = percent
group_pressure = inHg # Options are 'inHg', 'mmHg', 'mbar', or 'hPa'
group_radiation = watt_per_meter_squared
group_rain = inch # Options are 'inch', 'cm', or 'mm'
group_rainrate = inch_per_hour # Options are 'inch_per_hour', 'cm_per_hour', or 'mm_per_hour'
group_speed = mile_per_hour # Options are 'mile_per_hour', 'km_per_hour', 'knot', or 'meter_per_second'
group_speed2 = mile_per_hour2 # Options are 'mile_per_hour2', 'km_per_hour2', 'knot2', or 'meter_per_second2'
group_temperature = degree_F # Options are 'degree_F' or 'degree_C'
group_uv = uv_index
group_volt = volt
# The following are used internally and should not be changed:
group_count = count
group_interval = minute
group_time = unix_epoch
group_elapsed = second
# The following section sets the formatting for each type of unit.
[[StringFormats]]
centibar = %.0f
cm = %.2f
cm_per_hour = %.2f
degree_C = %.1f
degree_F = %.1f
degree_compass = %.0f
foot = %.0f
hPa = %.1f
hour = %.1f
inHg = %.3f
inch = %.2f
inch_per_hour = %.2f
km = %.1f
km_per_hour = %.0f
km_per_hour2 = %.1f
knot = %.0f
knot2 = %.1f
mbar = %.1f
meter = %.0f
meter_per_second = %.1f
meter_per_second2 = %.1f
mile = %.1f
mile_per_hour = %.0f
mile_per_hour2 = %.1f
mm = %.1f
mmHg = %.1f
mm_per_hour = %.1f
percent = %.0f
second = %.0f
uv_index = %.1f
volt = %.1f
watt_per_meter_squared = %.0f
NONE = " N/A"
# The following section sets the label to be used for each type of unit
[[Labels]]
centibar = " cb"
cm = " cm"
cm_per_hour = " cm/hr"
degree_C = °C
degree_F = °F
degree_compass = °
foot = " feet"
hPa = " hPa"
inHg = " inHg"
inch = " in"
inch_per_hour = " in/hr"
km = " km", " kms"
km_per_hour = " km/h"
km_per_hour2 = " km/h"
knot = " knots"
knot2 = " knots"
mbar = " mbar"
meter = " meters"
meter_per_second = " m/s"
meter_per_second2 = " m/s"
mile = " mile", " miles"
mile_per_hour = " mph"
mile_per_hour2 = " mph"
mm = " mm"
mmHg = " mmHg"
mm_per_hour = " mm/hr"
percent = %
volt = " V"
watt_per_meter_squared = " W/m²"
day = " day", " days"
hour = " hour", " hours"
minute = " minute", " minutes"
second = " second", " seconds"
NONE = ""
# The following section sets the format to be used for each time scale.
# The values below will work in every locale, but they may not look
# particularly attractive. See the Customization Guide for alternatives.
[[TimeFormats]]
hour = %H:%M
day = %X
week = %X (%A)
month = %x %X
year = %x %X
rainyear = %x %X
current = %x %X
ephem_day = %X
ephem_year = %x %X
[[Ordinates]]
# Ordinal directions. The last one should be for no wind direction
directions = N, NNE, NE, ENE, E, ESE, SE, SSE, S, SSW, SW, WSW, W, WNW, NW, NNW, N/A
# The following section sets the base temperatures used for the
# calculation of heating and cooling degree-days.
[[[DegreeDays]]]
# Base temperature for heating days, with unit:
heating_base = 65, degree_F
# Base temperature for cooling days, with unit:
cooling_base = 65, degree_F
# Base temperature for growing days, with unit:
growing_base = 50, degree_F
# A trend takes a difference across a time period. The following
# section sets the time period, and how big an error is allowed to
# still be counted as the start or end of a period.
[[[Trend]]]
time_delta = 10800 # 3 hours
time_grace = 300 # 5 minutes
# The labels are applied to observations or any other strings.
[Labels]
# Set to hemisphere abbreviations suitable for your location:
hemispheres = N, S, E, W
# Formats to be used for latitude whole degrees, longitude whole
# degrees, and minutes:
latlon_formats = "%02d", "%03d", "%05.2f"
# Generic labels, keyed by an observation type.
[[Generic]]
barometer = Barometer
dewpoint = Dew Point
ET = ET
heatindex = Heat Index
inHumidity = Inside Humidity
inTemp = Inside Temperature
outHumidity = Humidity
outTemp = Outside Temperature
radiation = Radiation
rain = Rain
rainRate = Rain Rate
UV = UV Index
windDir = Wind Direction
windGust = Gust Speed
windGustDir = Gust Direction
windSpeed = Wind Speed
windchill = Wind Chill
windgustvec = Gust Vector
windvec = Wind Vector
windrun = Wind Run
extraTemp1 = Temperature1
extraTemp2 = Temperature2
extraTemp3 = Temperature3
# Sensor status indicators
rxCheckPercent = Signal Quality
txBatteryStatus = Transmitter Battery
windBatteryStatus = Wind Battery
rainBatteryStatus = Rain Battery
outTempBatteryStatus = Outside Temperature Battery
inTempBatteryStatus = Inside Temperature Battery
consBatteryVoltage = Console Battery
heatingVoltage = Heating Battery
supplyVoltage = Supply Voltage
referenceVoltage = Reference Voltage
[Almanac]
# The labels to be used for the phases of the moon:
moon_phases = New, Waxing crescent, First quarter, Waxing gibbous, Full, Waning gibbous, Last quarter, Waning crescent
"""
# Even though default_str is in Unicode, specify an encoding in
# case someone wants to write the ConfigObj out.
defaults = configobj.ConfigObj(StringIO(default_str), encoding='utf-8', default_encoding='utf-8')
| [
"[email protected]"
] | |
d9450370110654bbba361d0adb0ff18def6f3bf6 | 52f0984561895b48f3e6e40658a6e52c97705715 | /python-folder/year-grade.py | 5b6647ed5326a8d753ec1092b8476883e8bf511b | [] | no_license | jsanon01/python | 8da2755e7724850875518455c1760bb9f04dd873 | edd52214e3578f18b71b0ad944c287411fb23dfb | refs/heads/master | 2022-05-20T00:29:10.550169 | 2022-05-10T01:08:48 | 2022-05-10T01:08:48 | 165,682,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,245 | py | # This script prints a while-loop with if-elif statement
year = " "
while year != 'q':
year = input('Enter a grade from 0 - 13 or q to quit: ')
if year.isdigit():
year = int(year)
if year == 0:
print('You are in Pre-School')
elif year == 1:
print('You are in Kindergarten')
elif year == 2:
print('You are in 1st Grade')
elif year == 3:
print('You are in 2nd Grade')
elif year == 4:
print('You are in 3rd Grade')
elif year == 5:
print('You are in 4th Grade')
elif year == 6:
print('You are in 5th Grade')
elif year == 7:
print('You are in 6th Grade')
elif year == 8:
print('You are in 7th Grade')
elif year == 9:
print('You are in 8th Grade')
elif year == 10:
print('You are in 9th Grade or Freshman')
elif year == 11:
print('You are in 10th Grade or Sophomore')
elif year == 12:
print('You are in 11th Grade or Junior')
elif year == 13:
print('You are in 12th Grade or Senior')
else:
print('You entered an invalid entry')
| [
"[email protected]"
] | |
005fc965039152d62022c24120d51fc81fda661b | 4bde2d1e2282014f71b8cfec4440cb062db172cb | /euler_021.py | 1bbf96ecb93e58589edeffbbaf5d3fcf9c7699a2 | [] | no_license | MrDeshaies/NOT-projecteuler.net | 6b107a515b1322fcd5f7d88e187ca2ea97edddcf | c6f0bd38d074b427345b4f5b41733bda38fbcdb4 | refs/heads/master | 2022-11-17T18:39:43.321814 | 2022-11-13T11:35:10 | 2022-11-13T11:35:10 | 201,793,983 | 0 | 0 | null | 2019-08-18T19:50:45 | 2019-08-11T17:20:18 | Python | UTF-8 | Python | false | false | 923 | py | from euler import *
import math
# Let d(n) be defined as the sum of proper divisors of n (numbers less than n which divide evenly into n).
# If d(a) = b and d(b) = a, where a ≠ b, then a and b are an amicable pair and each of a and b are called
# amicable numbers.
#
# For example, the proper divisors of 220 are 1, 2, 4, 5, 10, 11, 20, 22, 44, 55 and 110;
# therefore d(220) = 284. The proper divisors of 284 are 1, 2, 4, 71 and 142; so d(284) = 220.
#
# Evaluate the sum of all the amicable numbers under 10000.
def sumDivisors(x):
return sum(factLessItself(x))
amicableSet = set()
#skip 1, since d(1) = 1, so does not satisby a!=b
for x in range(2,10000):
if x in amicableSet:
continue
d = sumDivisors(x)
y = sumDivisors(d)
if d != x and y == x:
print( str(x) + " and " + str(d) + " are best buds.")
amicableSet.update([x,d])
print(amicableSet)
print(sum(amicableSet)) | [
"[email protected]"
] | |
055c26bc9905e675638f8bf8b9191eb93fadf19d | 44869749f8af2b548a2fbb23403e1a623e29d691 | /myvenv/Scripts/django-admin.py | 99ad1c3626d471588a35651a3339f4396eba88e2 | [] | no_license | Ojou/my-first-blog | 4536c4db194d325508fd000ccd5919a722772994 | e29be78c3c87b39c474dabf2a27387797c2d2a41 | refs/heads/master | 2016-08-12T15:27:52.761420 | 2016-03-12T05:06:06 | 2016-03-12T05:06:06 | 53,712,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 160 | py | #!C:\Users\tova\djangogirls\myvenv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
206043e6d4c95bbf4afa57ff9b6d0fa29d8d4d3d | bc441bb06b8948288f110af63feda4e798f30225 | /resource_monitor_sdk/model/resource_manage/filter_strategy_instance_data_pb2.py | 754fef25b2c606e53ea2f232404bda2034096d3d | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,615 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: filter_strategy_instance_data.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from resource_monitor_sdk.model.resource_manage import filter_condition_group_pb2 as resource__monitor__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='filter_strategy_instance_data.proto',
package='resource_manage',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manage'),
serialized_pb=_b('\n#filter_strategy_instance_data.proto\x12\x0fresource_manage\x1aGresource_monitor_sdk/model/resource_manage/filter_condition_group.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xa2\x01\n\x1a\x46ilterStrategyInstanceData\x12\n\n\x02id\x18\x01 \x01(\t\x12\x1a\n\x12strategyInstanceId\x18\x02 \x01(\t\x12%\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x35\n\x06\x66ilter\x18\x04 \x03(\x0b\x32%.resource_manage.FilterConditionGroupBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manageb\x06proto3')
,
dependencies=[resource__monitor__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_FILTERSTRATEGYINSTANCEDATA = _descriptor.Descriptor(
name='FilterStrategyInstanceData',
full_name='resource_manage.FilterStrategyInstanceData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='resource_manage.FilterStrategyInstanceData.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='strategyInstanceId', full_name='resource_manage.FilterStrategyInstanceData.strategyInstanceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='resource_manage.FilterStrategyInstanceData.data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='resource_manage.FilterStrategyInstanceData.filter', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=160,
serialized_end=322,
)
_FILTERSTRATEGYINSTANCEDATA.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_FILTERSTRATEGYINSTANCEDATA.fields_by_name['filter'].message_type = resource__monitor__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2._FILTERCONDITIONGROUP
DESCRIPTOR.message_types_by_name['FilterStrategyInstanceData'] = _FILTERSTRATEGYINSTANCEDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FilterStrategyInstanceData = _reflection.GeneratedProtocolMessageType('FilterStrategyInstanceData', (_message.Message,), {
'DESCRIPTOR' : _FILTERSTRATEGYINSTANCEDATA,
'__module__' : 'filter_strategy_instance_data_pb2'
# @@protoc_insertion_point(class_scope:resource_manage.FilterStrategyInstanceData)
})
_sym_db.RegisterMessage(FilterStrategyInstanceData)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
52cc436d976d9ead1d13b314196b6be9d9d8fc4c | c29eba01ce299ebb27b886a83e19e59add7e2f6b | /tests/pytest_extension/fixtures/test_issue_github_54.py | 34ceadfa56fc64602d0e04f8a54879098f489c44 | [
"BSD-3-Clause"
] | permissive | smarie/python-pytest-cases | e87516e73d5067d5c307c7fdb37cc5f1f97c417e | ab3b7190d728b18512141b9f5f3a1c3dfc7cedf2 | refs/heads/main | 2023-07-08T11:41:57.278697 | 2023-02-23T13:11:25 | 2023-02-23T13:11:25 | 138,296,136 | 286 | 40 | BSD-3-Clause | 2023-07-03T14:57:02 | 2018-06-22T11:42:19 | Python | UTF-8 | Python | false | false | 600 | py | # Authors: Sylvain MARIE <[email protected]>
# + All contributors to <https://github.com/smarie/python-pytest-cases>
#
# License: 3-clause BSD, <https://github.com/smarie/python-pytest-cases/blob/master/LICENSE>
import pytest
from pytest_cases.fixture_core1_unions import InvalidParamsList
from pytest_cases import parametrize, fixture_ref
@pytest.fixture
def test():
return ['a', 'b', 'c']
def test_invalid_argvalues():
with pytest.raises(InvalidParamsList):
@parametrize('main_msg', fixture_ref(test))
def test_prints(main_msg):
print(main_msg)
| [
"[email protected]"
] | |
a3e9a18765fad1e19b88ac4df2ef46b6ddef4d9b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/contrib/cv/detection/SOLOv1/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x.py | eba5902c5acd2a9c3bbb92f63de00ac450eb4f6b | [
"LicenseRef-scancode-proprietary-license",
"BSD-2-Clause",
"Apache-2.0",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 6,718 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the BSD 3-Clause License (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# model settings
model = dict(
type='FasterRCNN',
pretrained='open-mmlab://resnet50_caffe',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='caffe'),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=5),
rpn_head=dict(
type='GARPNHead',
in_channels=256,
feat_channels=256,
octave_base_scale=8,
scales_per_octave=3,
octave_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32, 64],
anchor_base_sizes=None,
anchoring_means=[.0, .0, .0, .0],
anchoring_stds=[0.07, 0.07, 0.14, 0.14],
target_means=(.0, .0, .0, .0),
target_stds=[0.07, 0.07, 0.11, 0.11],
loc_filter_thr=0.01,
loss_loc=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=81,
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1],
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)))
# model training and testing settings
train_cfg = dict(
rpn=dict(
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
center_ratio=0.2,
ignore_ratio=0.5,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=300,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=1e-3, nms=dict(type='nms', iou_thr=0.5), max_per_img=100))
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[102.9801, 115.9465, 122.7717], std=[1.0, 1.0, 1.0], to_rgb=False)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[8, 11])
checkpoint_config = dict(interval=1)
# yapf:disable
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
# yapf:enable
# runtime settings
total_epochs = 12
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/ga_faster_rcnn_r50_caffe_fpn_1x'
load_from = None
resume_from = None
workflow = [('train', 1)]
| [
"[email protected]"
] | |
861755d3c8cbf83029189ac9a98f4896f67dafad | b0110e27e3162e2092259dd299481de1dafb4ea8 | /parallel/p7red.test.key.py | 44867a6a942964cfe29d902c74675fd2ad65708f | [
"MIT"
] | permissive | mobarski/sandbox | f9be203bf7015f6df70badd605a40172b63a90f8 | f9054fb3252488208e503a87efba5df74fc70538 | refs/heads/master | 2023-05-29T14:51:00.125028 | 2023-05-14T21:02:38 | 2023-05-14T21:02:38 | 86,854,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | from __future__ import print_function
import sys
lines = sys.stdin.readlines()
rows = [str.partition(x,' ') for x in lines if x.strip()]
key_sum = 0
key = rows[0][0]
for k,_,x in rows:
if k!=key:
print(key,key_sum)
key_sum = 0
key = k
key_sum += int(x)
print(key,key_sum)
| [
"[email protected]"
] | |
544f532144174157f2267fe49d08336f13de9d1e | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/前端/juejin_2003.py | 68c317bc66f0071a6e258987ac4befff8d3dcb57 | [] | no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67,318 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6986465633114259469", "article_info": {"article_id": "6986465633114259469", "user_id": "1996368846261294", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640369764958215, 6809640653266354190], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/d1f5ea8a6b3041879c44e13109d3c34b~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "uni-app开发微信小程序和h5应用", "brief_content": "最近,有个需求需要开发H5应用和微信小程序。如果针对不同的平台开发自己的一套代码,那将是一件很糟糕的事情:如果下次需要兼容支付宝小程序、快应用,那工作量随着平台的添加而快速增加。所以选择uni-app", "is_english": 0, "is_original": 1, "user_index": 11.204596981717515, "original_type": 0, "original_author": "", "content": "", "ctime": "1626663399", "mtime": "1626663516", "rtime": "1626663516", "draft_id": "6984667078162645029", "view_count": 992, "collect_count": 11, "digg_count": 10, "comment_count": 1, "hot_index": 60, "is_hot": 0, "rank_index": 0.00580811, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1996368846261294", "user_name": "Jimmy", "company": "Foreign Enterprise", "job_title": "Canton", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/67854670252c7aa1d747ae166576a645~300x300.image", "level": 4, "description": "https://www.jimmyarea.com", "followee_count": 1, "follower_count": 2247, "post_article_count": 100, "digg_article_count": 227, "got_digg_count": 6048, "got_view_count": 331277, "post_shortmsg_count": 18, "digg_shortmsg_count": 59, "isfollowed": false, "favorable_author": 1, "power": 9306, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546704, "tag_id": "6809640653266354190", "tag_name": "微信小程序", "color": "#11a600", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/a1e7773920f51db40441.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1474932627, "mtime": 1631692796, "id_type": 9, "tag_alias": "", "post_article_count": 7107, "concern_user_count": 221757}], "user_interact": {"id": 6986465633114259469, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6976054641746247688", "article_info": {"article_id": "6976054641746247688", "user_id": "4054654615555854", "category_id": "6809637767543259144", "tag_ids": [6809640625856577549, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "浏览器知识点整理(八)DOM 和 JS、CSS 不得不说的故事", "brief_content": "这篇文章带你了解 DOM、JS、CSS 三者的爱恨情长:DOM树是怎么生成的?解析 HTML 时遇到了 JS 会怎么样?JS 遇到了 CSS 又会怎么样?为什么要把 CSS 放头部和把 JS 放尾部", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1624239319", "mtime": "1626495834", "rtime": "1624247493", "draft_id": "6973678917895454756", "view_count": 945, "collect_count": 14, "digg_count": 51, "comment_count": 20, "hot_index": 118, "is_hot": 0, "rank_index": 0.00580486, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4054654615555854", "user_name": "起风了Q", "company": "kingsoft", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/288ec7eadb3dfe0f8b55047f2ee52574~300x300.image", "level": 3, "description": "你相信什么,就会遇见什么", "followee_count": 76, "follower_count": 305, "post_article_count": 73, "digg_article_count": 1528, "got_digg_count": 2076, "got_view_count": 47830, "post_shortmsg_count": 1, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 2554, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546683, "tag_id": "6809640625856577549", "tag_name": "浏览器", "color": "#47ebc7", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/baf3558e2acdfa623201.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1460153459, "mtime": 1631677186, "id_type": 9, "tag_alias": "", "post_article_count": 3341, "concern_user_count": 28324}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6976054641746247688, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7001522790784303112", "article_info": {"article_id": "7001522790784303112", "user_id": "2401755217788935", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "时间对象及时间戳的运用(时针、倒计时)", "brief_content": "时间对象 时间的获取 时间的操作 及特别需要注意的两点 时间的操作实现时针的写法 时间戳 实现 倒计时的写法注意事项", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1630169322", "mtime": "1630304771", "rtime": "1630304771", "draft_id": "7001506304221003812", "view_count": 196, "collect_count": 0, "digg_count": 4, "comment_count": 0, "hot_index": 13, "is_hot": 0, "rank_index": 0.00580381, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2401755217788935", "user_name": "sunShine", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/mosaic-legacy/3793/3114521287~300x300.image", "level": 1, "description": "", "followee_count": 6, "follower_count": 1, "post_article_count": 36, "digg_article_count": 11, "got_digg_count": 35, "got_view_count": 1305, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 48, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7001522790784303112, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6995882287539683358", "article_info": {"article_id": "6995882287539683358", "user_id": "457021165420152", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/24965ec109d94db99d11661209725441~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "JavaScrip入门指南之“Web API、DOM”(笔记十)", "brief_content": "Web API介绍 API 的概念 API(Application Programming Interface,应用程序编程接口)是一些预先定义的函数或方法,目的是提供应用程序与开发人员基于某软件或硬", "is_english": 0, "is_original": 1, "user_index": 7.630034299002856, "original_type": 0, "original_author": "", "content": "", "ctime": "1628855942", "mtime": "1629101909", "rtime": "1629101909", "draft_id": "6993515250540642340", "view_count": 187, "collect_count": 3, "digg_count": 11, "comment_count": 2, "hot_index": 22, "is_hot": 0, "rank_index": 0.00580334, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "457021165420152", "user_name": "Grit_1024", "company": "", "job_title": "前端开发工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/9bed61fa13462d9d3cb55946bb9cbad8~300x300.image", "level": 2, "description": "温故而知新", "followee_count": 130, "follower_count": 37, "post_article_count": 31, "digg_article_count": 1041, "got_digg_count": 489, "got_view_count": 5651, "post_shortmsg_count": 1, "digg_shortmsg_count": 4, "isfollowed": false, "favorable_author": 0, "power": 545, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 6995882287539683358, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7002419362430713893", "article_info": {"article_id": "7002419362430713893", "user_id": "3737995266239848", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/71c1b56fe6a145818dd81bcd0639f7d8~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "湖中剑 前端周刊 #6 | 2021-08-30", "brief_content": "周刊收集包括前端(但不限于前端)的文章、新闻、开源项目、工具等等,每周一更新。 📰 News TypeScript 发布4.4版本 主要变更: 提供针对 Aliased Conditions 的控制流", "is_english": 0, "is_original": 1, "user_index": 6.331357980475461, "original_type": 0, "original_author": "", "content": "", "ctime": "1630377827", "mtime": "1630386850", "rtime": "1630386850", "draft_id": "7002418597129617444", "view_count": 83, "collect_count": 1, "digg_count": 2, "comment_count": 0, "hot_index": 6, "is_hot": 0, "rank_index": 0.00579866, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3737995266239848", "user_name": "ineo6", "company": "", "job_title": "B站打工人", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/1698faa8df39380d260~tplv-t2oaga2asx-image.image", "level": 2, "description": "", "followee_count": 12, "follower_count": 42, "post_article_count": 42, "digg_article_count": 2, "got_digg_count": 133, "got_view_count": 36680, "post_shortmsg_count": 6, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 500, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7002419362430713893, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6994563519047794701", "article_info": {"article_id": "6994563519047794701", "user_id": "3747558609661213", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "🚀详解JavaScript系列之数组(十)终结篇", "brief_content": "这是我参与8月更文挑战的第10天,活动详情查看:8月更文挑战 前言 reduce() 作用: reduce() 方法接收一个函数作为累加器,数组中的每个值(从左到右)开始缩减,最终计算为一个值。返回值", "is_english": 0, "is_original": 1, "user_index": 6.870045844634849, "original_type": 0, "original_author": "", "content": "", "ctime": "1628549434", "mtime": "1628567307", "rtime": "1628567307", "draft_id": "6994444076149899277", "view_count": 154, "collect_count": 1, "digg_count": 24, "comment_count": 0, "hot_index": 31, "is_hot": 0, "rank_index": 0.00579776, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3747558609661213", "user_name": "小只前端攻城狮", "company": "滴滴 | 前端研发工程师", "job_title": "公众号:小攻城狮学前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/4533378875c883604f33d3a3a2e9de5c~300x300.image", "level": 3, "description": "Web全栈开发、持续学习者,关注公众号第一时间接收最新文章,也经常分享一些好用的工具", "followee_count": 40, "follower_count": 114, "post_article_count": 92, "digg_article_count": 323, "got_digg_count": 1575, "got_view_count": 28793, "post_shortmsg_count": 5, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 1862, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 6994563519047794701, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6967972435064782879", "article_info": {"article_id": "6967972435064782879", "user_id": "2049145407280557", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "GitLab+Docker快速搭建CI/CD自动化部署", "brief_content": "什么是持续集成(Continuous integration)? CI 在持续集成环境中,开发人员将会频繁得提交代码到主干。这些新提交在最终合并到主线之前,都需要通过编译和自动化测试进行验证。这样做是", "is_english": 0, "is_original": 1, "user_index": 3.842857466371409, "original_type": 0, "original_author": "", "content": "", "ctime": "1622357531", "mtime": "1622359785", "rtime": "1622359785", "draft_id": "6967660902879330340", "view_count": 2228, "collect_count": 70, "digg_count": 43, "comment_count": 1, "hot_index": 155, "is_hot": 0, "rank_index": 0.00579307, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2049145407280557", "user_name": "贪吃的猫", "company": "", "job_title": "前端开发", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/944711d0171517fd54809d133bde5907~300x300.image", "level": 2, "description": "每天坚持学习,学习要有输出。有计划有目标进行。坚持就是胜利!", "followee_count": 13, "follower_count": 6, "post_article_count": 10, "digg_article_count": 11, "got_digg_count": 77, "got_view_count": 4273, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 119, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6967972435064782879, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6997032386462482469", "article_info": {"article_id": "6997032386462482469", "user_id": "3368559357218382", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640357354012685], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/81caa82f299d41f0a27e21fef79586b4~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "浅谈前端状态管理的进化", "brief_content": "如何重新思考前端状态设计?还有,为什么必须要凑够50字?是考虑对排版友好吗哈哈哈哈哈哈哈哈哈哈哈哈哈", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629123715", "mtime": "1629167911", "rtime": "1629167911", "draft_id": "6997032014914256926", "view_count": 435, "collect_count": 2, "digg_count": 8, "comment_count": 0, "hot_index": 29, "is_hot": 0, "rank_index": 0.00579504, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3368559357218382", "user_name": "王圣松", "company": "Gitee", "job_title": "前端开发工程师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/935bb913da4e37a6c7d5071dcb0d8c88~300x300.image", "level": 3, "description": "前端扫地机器人", "followee_count": 159, "follower_count": 946, "post_article_count": 23, "digg_article_count": 119, "got_digg_count": 1569, "got_view_count": 92964, "post_shortmsg_count": 17, "digg_shortmsg_count": 41, "isfollowed": false, "favorable_author": 0, "power": 2497, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546490, "tag_id": "6809640357354012685", "tag_name": "React.js", "color": "#61DAFB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f655215074250f10f8d4.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234367, "mtime": 1631692935, "id_type": 9, "tag_alias": "", "post_article_count": 16999, "concern_user_count": 226420}], "user_interact": {"id": 6997032386462482469, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6997052050903138334", "article_info": {"article_id": "6997052050903138334", "user_id": "1451011082030302", "category_id": "6809637767543259144", "tag_ids": [6809640357354012685, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/79519e453c4e41d5aebe6f5fcf1d9679~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "用 Next.js 做服务端渲染, React 农民工快速搬砖必学", "brief_content": "在新兴性前端农民工日益增加的时代,为了更好的搬砖,`next` 成了 `React` 项目在做服务端渲染不得不学的一个框架,前端为什么要做服务端渲染,一搜索引擎 `SEO` 优化,二 减少首屏渲染时间", "is_english": 0, "is_original": 1, "user_index": 6.508716229440013, "original_type": 0, "original_author": "", "content": "", "ctime": "1629128262", "mtime": "1629183287", "rtime": "1629183287", "draft_id": "6997047428943708197", "view_count": 308, "collect_count": 4, "digg_count": 5, "comment_count": 2, "hot_index": 22, "is_hot": 0, "rank_index": 0.00579455, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1451011082030302", "user_name": "我说姑娘", "company": "杭州某互联网", "job_title": "前端萌新", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/36f734693e4fef4cc32a2e7e08c66d72~300x300.image", "level": 1, "description": "", "followee_count": 17, "follower_count": 18, "post_article_count": 4, "digg_article_count": 79, "got_digg_count": 30, "got_view_count": 2442, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 54, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546490, "tag_id": "6809640357354012685", "tag_name": "React.js", "color": "#61DAFB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f655215074250f10f8d4.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234367, "mtime": 1631692935, "id_type": 9, "tag_alias": "", "post_article_count": 16999, "concern_user_count": 226420}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6997052050903138334, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7003267384358207501", "article_info": {"article_id": "7003267384358207501", "user_id": "641770522946254", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "原型 原型链", "brief_content": "js分为函数对象和普通对象,每个对象都有__proto__属性,但是只有函数对象才有prototype属性 Object、Function都是js内置的函数, 类似的还有我们常用到的Array、Reg", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1630575301", "mtime": "1630651717", "rtime": "1630575849", "draft_id": "7003239518023204877", "view_count": 204, "collect_count": 1, "digg_count": 0, "comment_count": 0, "hot_index": 10, "is_hot": 0, "rank_index": 0.00579413, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "641770522946254", "user_name": "Lisanmu", "company": "", "job_title": "前端开发", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/9410632d2470acf51c3a2f3e7b67e3d1~300x300.image", "level": 1, "description": "", "followee_count": 39, "follower_count": 1, "post_article_count": 11, "digg_article_count": 0, "got_digg_count": 2, "got_view_count": 577, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 7, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7003267384358207501, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7002975286539075592", "article_info": {"article_id": "7002975286539075592", "user_id": "1654113622572472", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "JavaScript中this指向问题", "brief_content": "无论是工作或者面试中,this指向问题是经常遇到的。所以这篇文章把常见的指向问题列出来给大家,避免踩坑。首先我们要知道,在函数中this到底取何值,是在函数真正被调用执行的时候确定的,函数定义的时候确", "is_english": 0, "is_original": 1, "user_index": 3.969362295916118, "original_type": 0, "original_author": "", "content": "", "ctime": "1630507245", "mtime": "1630554178", "rtime": "1630554178", "draft_id": "7002967243252301860", "view_count": 64, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 6, "is_hot": 0, "rank_index": 0.00579235, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1654113622572472", "user_name": "芒果炒香菜", "company": "", "job_title": "", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/b0ad8ff1d5e6f8490b1d6d710bd0ed89~300x300.image", "level": 2, "description": "虽无圣贤之心,益慕圣贤之道", "followee_count": 12, "follower_count": 11, "post_article_count": 60, "digg_article_count": 197, "got_digg_count": 252, "got_view_count": 4961, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 301, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 7002975286539075592, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7002036441601540109", "article_info": {"article_id": "7002036441601540109", "user_id": "2911162520331037", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "「轻松科普」浏览器中的 JavaScript", "brief_content": "学习编程知识,能不能生动有趣,容易理解呢?我还在探索尝试的过程中,这篇文章,将从不一样的角度切入,让大家对浏览器中的 JavaScript 有了一些了解,希望大家看完之后,都有不一样的收获。", "is_english": 0, "is_original": 1, "user_index": 8.366364165700077, "original_type": 0, "original_author": "", "content": "", "ctime": "1630288673", "mtime": "1630293175", "rtime": "1630293175", "draft_id": "6999623926242344990", "view_count": 53, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00578797, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2911162520331037", "user_name": "追梦玩家", "company": "广州", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/6cef914c7bbca2a112552b013ebf61f5~300x300.image", "level": 2, "description": "相信自己有能力,那么你就真的会有!", "followee_count": 5, "follower_count": 117, "post_article_count": 36, "digg_article_count": 100, "got_digg_count": 541, "got_view_count": 44842, "post_shortmsg_count": 22, "digg_shortmsg_count": 20, "isfollowed": false, "favorable_author": 0, "power": 989, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}], "user_interact": {"id": 7002036441601540109, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6993951401952935972", "article_info": {"article_id": "6993951401952935972", "user_id": "272334614432887", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640396788858887], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/f8ad5b4457c74d45b03ba76ec029b11b~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "玩转 Docker 部署", "brief_content": "前言 相信很多人都很头疼 Docker 的部署,我自己也是。 最近发现一个很有意思的现象:一个人想学某样技术的时候,当学会了之后,但是这时出现了一个问题需要学习另一门技术时,无论这个人前面学得多么刻苦", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628406256", "mtime": "1628492105", "rtime": "1628492105", "draft_id": "6993878290234605604", "view_count": 494, "collect_count": 7, "digg_count": 12, "comment_count": 3, "hot_index": 39, "is_hot": 0, "rank_index": 0.0057874, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "272334614432887", "user_name": "写代码的海怪", "company": "公众号 | 写代码的海怪 | 腾讯", "job_title": "", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/fd56914f48c601eb135015f35b94bece~300x300.image", "level": 2, "description": "聊聊技术和分享生活。", "followee_count": 18, "follower_count": 154, "post_article_count": 40, "digg_article_count": 56, "got_digg_count": 314, "got_view_count": 19642, "post_shortmsg_count": 17, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 510, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546518, "tag_id": "6809640396788858887", "tag_name": "Docker", "color": "#344D56", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/1265c034d36735225ac5.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432604595, "mtime": 1631684954, "id_type": 9, "tag_alias": "", "post_article_count": 5601, "concern_user_count": 134765}], "user_interact": {"id": 6993951401952935972, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7004735253755478029", "article_info": {"article_id": "7004735253755478029", "user_id": "1310273590265821", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/4ccff10900f74fc2a621a83c6bd9fa71~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "必须知道的JavaScript String原生方法", "brief_content": "在JavaScript中,字符串的原生方法并不多,所以字符串操作是每个前端必须掌握的,面试的时候也会被重点考察。 创建字符串 静态方法 fromCharCode() 将Unicode编码转换为字符 f", "is_english": 0, "is_original": 1, "user_index": 2.304290107072768, "original_type": 0, "original_author": "", "content": "", "ctime": "1630917063", "mtime": "1630982329", "rtime": "1630982329", "draft_id": "7004650821619023880", "view_count": 50, "collect_count": 2, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00578523, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1310273590265821", "user_name": "傲夫靠斯", "company": "", "job_title": "公号 @ 前端工程师的自我修养", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/745930fdd28cb84dfe523839ebd57483~300x300.image", "level": 2, "description": "个人微信:cmdfas 备注来源[掘金]", "followee_count": 4, "follower_count": 17, "post_article_count": 21, "digg_article_count": 11, "got_digg_count": 106, "got_view_count": 5030, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 156, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7004735253755478029, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6998042582865412110", "article_info": {"article_id": "6998042582865412110", "user_id": "3737995267806766", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640699667939342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "什么是函数式编程?", "brief_content": "什么是函数式编程? 函数式编程(Function Programming,简写 FP ),和面对对象编程,面向过程编程一样,是一种编程范式.", "is_english": 0, "is_original": 1, "user_index": 3.969362295916118, "original_type": 0, "original_author": "", "content": "", "ctime": "1629358775", "mtime": "1629442663", "rtime": "1629442663", "draft_id": "6998040943681093640", "view_count": 249, "collect_count": 1, "digg_count": 7, "comment_count": 2, "hot_index": 21, "is_hot": 0, "rank_index": 0.00578427, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3737995267806766", "user_name": "南园游", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/12/16/16f0cadb85cdfad9~tplv-t2oaga2asx-image.image", "level": 1, "description": "", "followee_count": 5, "follower_count": 2, "post_article_count": 4, "digg_article_count": 8, "got_digg_count": 13, "got_view_count": 925, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 22, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546737, "tag_id": "6809640699667939342", "tag_name": "函数式编程", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/75f57f953f13200a7e6a.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1480964210, "mtime": 1631669923, "id_type": 9, "tag_alias": "", "post_article_count": 946, "concern_user_count": 35874}], "user_interact": {"id": 6998042582865412110, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7001134628916428836", "article_info": {"article_id": "7001134628916428836", "user_id": "378645226199581", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/dc4d59ada990432b924ca8ab025a829a~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "TCP三次握手与四次挥手,TCP/IP,UDP和TCP区别全在这里了", "brief_content": "TCP位于传输层,作用是提供可靠的字节流服务,为了准确无误地将数据送达目的地,TCP协议采纳三次握手策略", "is_english": 0, "is_original": 1, "user_index": 4.508716229440012, "original_type": 0, "original_author": "", "content": "", "ctime": "1630078718", "mtime": "1630546498", "rtime": "1630131665", "draft_id": "7001122333259726861", "view_count": 118, "collect_count": 1, "digg_count": 4, "comment_count": 2, "hot_index": 11, "is_hot": 0, "rank_index": 0.00578299, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "378645226199581", "user_name": "jojo的奇妙前端", "company": "Dio的面包屋", "job_title": "面包师", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/694be53d2340f5910447b65e31b96684~300x300.image", "level": 1, "description": "", "followee_count": 3, "follower_count": 11, "post_article_count": 13, "digg_article_count": 28, "got_digg_count": 65, "got_view_count": 1788, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 82, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7001134628916428836, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6993990148752932872", "article_info": {"article_id": "6993990148752932872", "user_id": "1486195453595736", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p9-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8d3665f2b80346239177c80c2b3c8959~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "vue中的key到底有什么用?", "brief_content": "key是什么 在vue中,我们经常使用的指令其中必定有key,我们先看看vue的官网文档中是怎么定义key这个指令的 vue使用的虚拟dom,不直接操作dom元素,在操作虚拟dom的时候又使用了dif", "is_english": 0, "is_original": 1, "user_index": 5.643386594270234, "original_type": 0, "original_author": "", "content": "", "ctime": "1628415285", "mtime": "1628415793", "rtime": "1628415793", "draft_id": "6993599238328287263", "view_count": 447, "collect_count": 0, "digg_count": 13, "comment_count": 0, "hot_index": 35, "is_hot": 0, "rank_index": 0.00578088, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1486195453595736", "user_name": "HavanaLee", "company": "", "job_title": "前端摸鱼工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/5bd836af021fb14b274fd87909535a7e~300x300.image", "level": 2, "description": "", "followee_count": 14, "follower_count": 7, "post_article_count": 10, "digg_article_count": 154, "got_digg_count": 77, "got_view_count": 3018, "post_shortmsg_count": 0, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 107, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631692660, "id_type": 9, "tag_alias": "", "post_article_count": 31256, "concern_user_count": 313520}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6993990148752932872, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "6844904201672196110", "article_info": {"article_id": "6844904201672196110", "user_id": "712139233840407", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "https://juejin.im/post/6844904201672196110", "cover_image": "", "is_gfw": 0, "title": "总感觉自己不会的太多了,不知该如何下手?", "brief_content": "前端东西确实蛮多,但也没必要什么都想学。一旦你有这个想法,多半会像个无头苍蝇乱飞。这个看看,那个学点,到头来啥东西都没学好。 这样的例子其实我在读者里看到好些了,学习确实看起来是在学习,啥资料都收藏了,今天看会这个技术的视频,明天拿上另一个技术的书读起来,但是这种学习方式相当低…", "is_english": 0, "is_original": 1, "user_index": 0.53262452509886, "original_type": 0, "original_author": "", "content": "", "ctime": "1593400023", "mtime": "1599038787", "rtime": "1593400023", "draft_id": "6845076841087107080", "view_count": 14283, "collect_count": 124, "digg_count": 229, "comment_count": 48, "hot_index": 991, "is_hot": 0, "rank_index": 0.00577923, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "712139233840407", "user_name": "yck", "company": "「前端真好玩」公众号作者", "job_title": "前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/a386aa8db73c9678458ec34161472ca5~300x300.image", "level": 7, "description": "", "followee_count": 21, "follower_count": 34610, "post_article_count": 84, "digg_article_count": 105, "got_digg_count": 45060, "got_view_count": 1475733, "post_shortmsg_count": 12, "digg_shortmsg_count": 7, "isfollowed": false, "favorable_author": 1, "power": 59614, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 98, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631692583, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398956}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 6844904201672196110, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7002541621828911117", "article_info": {"article_id": "7002541621828911117", "user_id": "4195392104175527", "category_id": "6809637767543259144", "tag_ids": [6809640357354012685, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "https://p3-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/33623236cdd441b1be41706d227e4fa6~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "从源码理解setState(React15.6.0)", "brief_content": "React:setState执行机制,setState同步异步表现形式成因,setState连续设置对象参数只生效最后一次设置问题原因", "is_english": 0, "is_original": 1, "user_index": 4.889445618977261, "original_type": 0, "original_author": "", "content": "", "ctime": "1630406325", "mtime": "1630588778", "rtime": "1630473914", "draft_id": "7002485527974772744", "view_count": 130, "collect_count": 0, "digg_count": 0, "comment_count": 0, "hot_index": 6, "is_hot": 0, "rank_index": 0.00577807, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4195392104175527", "user_name": "Tsuki_", "company": "苞米", "job_title": "more than two years", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/99973816f15115ba1f9437e862674257~300x300.image", "level": 2, "description": "没有翻不过的沟 只是你努力还不够", "followee_count": 10, "follower_count": 73, "post_article_count": 25, "digg_article_count": 43, "got_digg_count": 91, "got_view_count": 8933, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 180, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546490, "tag_id": "6809640357354012685", "tag_name": "React.js", "color": "#61DAFB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f655215074250f10f8d4.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234367, "mtime": 1631692935, "id_type": 9, "tag_alias": "", "post_article_count": 16999, "concern_user_count": 226420}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}], "user_interact": {"id": 7002541621828911117, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}, {"article_id": "7001409580890587172", "article_info": {"article_id": "7001409580890587172", "user_id": "2928754707141608", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640394175971342], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "CSS动画-调速函数 | steps与帧动画", "brief_content": "这是我参与8月更文挑战的第28天,活动详情查看:8月更文挑战 在文章CSS动画-调速函数一文中,我们初步了解了一下CSS调速函数animation-timing-function的作用,介绍了一个重要", "is_english": 0, "is_original": 1, "user_index": 3.881363120879044, "original_type": 0, "original_author": "", "content": "", "ctime": "1630142986", "mtime": "1630309636", "rtime": "1630309636", "draft_id": "7001400277320499230", "view_count": 138, "collect_count": 1, "digg_count": 3, "comment_count": 0, "hot_index": 9, "is_hot": 0, "rank_index": 0.0057777, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2928754707141608", "user_name": "KevinQ", "company": "某国企", "job_title": "全干工程师", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/985fdb8019434c98a2d1ef549dc59fef~300x300.image", "level": 2, "description": "啥都会一点儿的后端coder", "followee_count": 111, "follower_count": 35, "post_article_count": 102, "digg_article_count": 181, "got_digg_count": 339, "got_view_count": 23803, "post_shortmsg_count": 274, "digg_shortmsg_count": 449, "isfollowed": false, "favorable_author": 0, "power": 507, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631692835, "id_type": 9, "tag_alias": "", "post_article_count": 88828, "concern_user_count": 527704}, {"id": 2546516, "tag_id": "6809640394175971342", "tag_name": "CSS", "color": "#244DE4", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/66de0c4eb9d10130d5bf.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432239426, "mtime": 1631688735, "id_type": 9, "tag_alias": "", "post_article_count": 14981, "concern_user_count": 297034}], "user_interact": {"id": 7001409580890587172, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "20210915160509010204026215010001E4"}], "cursor": "eyJ2IjoiNzAwNzk5MTg0ODMwODMxMDAyNCIsImkiOjQ3NjB9", "count": 34210, "has_more": true} | [
"[email protected]"
] | |
06bd77a00c108cd3162f43c0b8c735e395c7c330 | a12a4be7e8c792b4c1f2765d3e7a43056e9196b0 | /399-evaluate-division/399-evaluate-division.py | 317cbb08733f23f1593c0c5e5836a04b160ea65c | [] | no_license | fdas3213/Leetcode | d4b7cfab70446b3f6a961252a55b36185bc87712 | 1335d5759c41f26eb45c8373f33ee97878c4a638 | refs/heads/master | 2022-05-28T16:24:15.856679 | 2022-05-19T21:56:35 | 2022-05-19T21:56:35 | 94,024,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,198 | py | class Solution:
def calcEquation(self, equations: List[List[str]], values: List[float], queries: List[List[str]]) -> List[float]:
#step 1. initialize a graph
graph = defaultdict(defaultdict)
for pair, value in zip(equations, values):
v1, v2 = pair[0], pair[1]
graph[v1][v2] = value
graph[v2][v1] = 1/value
def evaluate(cur_node, target_node, product, visited):
visited.add(cur_node)
val = -1
neighbors = graph[cur_node]
if target_node in neighbors:
return product * neighbors[target_node]
else:
for neighbor, value in neighbors.items():
if neighbor not in visited:
val = evaluate(neighbor, target_node, product*value, visited)
if val != -1:
break
visited.remove(cur_node)
return val
def evaluate_bfs(cur_node, target_node):
visited = set()
queue = deque([(cur_node, 1)])
while queue:
cur_node, cur_val = queue.popleft()
visited.add(cur_node)
if target_node == cur_node:
return cur_val
neighbors = graph[cur_node]
for neighbor, val in neighbors.items():
if neighbor not in visited:
queue.append((neighbor, cur_val*val))
return -1
#step 2. evaluate the query
res = []
for n1,n2 in queries:
#if either of the node does not exist in the graph
if n1 not in graph or n2 not in graph:
res.append(-1)
continue
#if n1 and n2 is the same node
if n1 == n2:
res.append(1)
continue
#dfs
visited = set()
res.append(evaluate(n1, n2, 1, visited))
#bfs: res.append(evaluate_bfs(n1, n2))
return res
| [
"[email protected]"
] | |
4749db7324c75666dd8e25a25566092e3b09963e | dd3bbd4e7aaee7a8a5f26b927ce28ac472c855a5 | /eggs/plone.app.kss-1.6.2-py2.7.egg/plone/app/kss/demo/bbb_oldkssdemo.py | 112c7267a425cbc4ada948bfe06706153dd4619d | [] | no_license | nacho22martin/tesis | ea0a822f8bdbdef6f13f41276ecd4d6e85427ca5 | e137eb6225cc5e724bee74a892567796166134ac | refs/heads/master | 2020-12-24T13:20:58.334839 | 2013-11-09T12:42:41 | 2013-11-09T12:42:41 | 14,261,570 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,315 | py |
# XXX future BBB
# Provide a way for the old kss.demo version, not to fail
# with import error - even if it cannot execute these tests.
# This enables that the package can contain application level
# test setup, but it still does not fail with the old version.
try:
import kss.demo
from kss.demo import (
KSSSeleniumTestDirectory,
KSSDemo,
KSSSeleniumTestCase,
KSSSeleniumTestSuite,
KSSSeleniumTestLayerBase,
KSSSeleniumSandboxCreationTestCase,
)
except ImportError:
# nonexistent constructs. They will not work, but
# they will run without errors.
class Fake(object):
# test_directory is needed because the caller code
# will treat us as a TestDirectory. So, we give a
# directory that does not contain any *.html files.
test_directory = '/'
def __init__(self, *arg, **kw):
pass
#
import kss.demo.resource
# Provide the classes directly on kss.demo namespace
kss.demo.KSSSeleniumTestDirectory = kss.demo.resource.KSSSeleniumTestDirectory
kss.demo.KSSDemo = kss.demo.resource.KSSDemo
kss.demo.KSSSeleniumTestCase = Fake
kss.demo.KSSSeleniumTestSuite = Fake
kss.demo.KSSSeleniumTestLayerBase = Fake
kss.demo.KSSSeleniumSandboxCreationTestCase = Fake
| [
"ignacio@plone.(none)"
] | ignacio@plone.(none) |
a14daf25d28db1dfa5c33f566606bc651a65b733 | 81539aba88c22cf75bd2e14f5e0e92f2bf54e962 | /DarkMatterMap2017/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8/TTbarDMJets_Dilepton_pseudoscalar_LO_Mchi-1_Mphi-500_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV_madgraph_mcatnlo_pythia8_30000_2_cff.py | ef4683daac65cf865ef31df25cfc4909e6363974 | [] | no_license | nistefan/RandomizedParametersSeparator | ad35b48b95e9745814c0bf9d8d8b6eb8aa479177 | 66a0e291b59113c6b5301768f1c10e36cf23d3c3 | refs/heads/master | 2021-01-03T00:41:17.415005 | 2020-02-19T13:30:54 | 2020-02-19T13:30:54 | 239,838,928 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,904 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, lumisToProcess = cms.untracked.VLuminosityBlockRange(*('1:36371', '1:38472', '1:38822', '1:36982', '1:37704', '1:36037', '1:37534', '1:36568', '1:37660', '1:37478', '1:37731', '1:37801', '1:38380', '1:38969', '1:38974', '1:38986', '1:37623', '1:36199', '1:36294', '1:36484', '1:38278', '1:38661', '1:38684', '1:36313', '1:36435', '1:37491', '1:38628', '1:38196', '1:38329', '1:38866', '1:36066', '1:36523', '1:38614', '1:38532', '1:38601', '1:37893', '1:37009', '1:38498', '1:36354', '1:36942', '1:36763', '1:37334', '1:37896', '1:37001', '1:37480', '1:36334', '1:36879', '1:36339', '1:36299', '1:36303', '1:36896', '1:36903', '1:36153', '1:38597', '1:38880', '1:38946', '1:38952', '1:38953', '1:38273', '1:38703', '1:38771', ))
)
readFiles.extend( ['/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/D0F8A41C-990E-EA11-A50D-AC1F6B1E3074.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/DCD9D649-2013-EA11-BBCA-98039B3B0032.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/88D5C4F6-9D0C-EA11-A344-24BE05C63681.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/14F851FE-6510-EA11-B76F-008CFAF74B22.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/2AC7749D-2013-EA11-AAAA-001E67792738.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/8228DED2-F60C-EA11-9D11-002590FD5A78.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/B4730441-2013-EA11-8532-0242AC130002.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/98375E4E-2013-EA11-BDE2-7CD30AD095BC.root', '/store/mc/RunIIFall17MiniAODv2/TTbarDMJets_Dilepton_pseudoscalar_LO_TuneCP5_13TeV-madgraph-mcatnlo-pythia8/MINIAODSIM/PU2017_12Apr2018_rp_94X_mc2017_realistic_v14-v1/30000/86D1A249-EF0D-EA11-BAF6-008CFAE45108.root']); | [
"[email protected]"
] | |
e12d9c7779e15c081580d82cfaaf33c753eba8e5 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /2gFkEsAqNZrs4yeck_5.py | 1deffbbc4273056ca01743885242d8e47d869034 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 66 | py |
mini_peaks=lambda l:[y for x,y,z in zip(l,l[1:],l[2:])if x<y>z]
| [
"[email protected]"
] | |
10688edc40347097c51ecda235be420e4c48ecaa | 2bcc421ee345b00cf805c543b37d18b5d019dc04 | /adafruit-circuitpython-bundle-6.x-mpy-20201126/examples/led_animation_group.py | 011a019ee803683432760c85a49cbbacb6bfd77c | [] | no_license | saewoonam/sc-current-source-titano | 5a1ad46889c1b09c168424901fd71cb4eab5c61b | 1c136aa8b61268d9ac0b5a682b30ece70ab87663 | refs/heads/main | 2023-03-02T22:12:26.685537 | 2021-02-09T03:28:01 | 2021-02-09T03:28:01 | 317,299,900 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py | """
This example shows three different ways to use AnimationGroup: syncing two animations, displaying
two animations at different speeds, and displaying two animations sequentially, across two separate
pixel objects such as the built-in NeoPixels on a Circuit Playground Bluefruit and a NeoPixel strip.
This example is written for Circuit Playground Bluefruit and a 30-pixel NeoPixel strip connected to
pad A1. It does not work on Circuit Playground Express.
"""
import board
import neopixel
from adafruit_circuitplayground import cp
from adafruit_led_animation.animation.blink import Blink
from adafruit_led_animation.animation.comet import Comet
from adafruit_led_animation.animation.chase import Chase
from adafruit_led_animation.group import AnimationGroup
from adafruit_led_animation.sequence import AnimationSequence
import adafruit_led_animation.color as color
strip_pixels = neopixel.NeoPixel(board.A1, 30, brightness=0.5, auto_write=False)
cp.pixels.brightness = 0.5
animations = AnimationSequence(
# Synchronized to 0.5 seconds. Ignores the second animation setting of 3 seconds.
AnimationGroup(
Blink(cp.pixels, 0.5, color.CYAN),
Blink(strip_pixels, 3.0, color.AMBER),
sync=True,
),
# Different speeds
AnimationGroup(
Comet(cp.pixels, 0.1, color.MAGENTA, tail_length=5),
Comet(strip_pixels, 0.01, color.MAGENTA, tail_length=15),
),
# Different animations
AnimationGroup(
Blink(cp.pixels, 0.5, color.JADE),
Comet(strip_pixels, 0.05, color.TEAL, tail_length=15),
),
# Sequential animations on the built-in NeoPixels then the NeoPixel strip
Chase(cp.pixels, 0.05, size=2, spacing=3, color=color.PURPLE),
Chase(strip_pixels, 0.05, size=2, spacing=3, color=color.PURPLE),
advance_interval=3.0,
auto_clear=True,
auto_reset=True,
)
while True:
animations.animate()
| [
"[email protected]"
] | |
e6aa1fc31893a65606e16abf84d605a55a52173a | e5a20362b2f9b17055cb95d56dc8dea2059205fb | /arrays_manipulations_algorithms/is_str_equal.py | 4c1d539801e17f907e0371055984660ed94ffd56 | [] | no_license | uchenna-j-edeh/dailly_problems | 0c97d1ab3c91756abf625a04e3bb6e0cd6e3405c | 7bd47232704297851f8acdd9331f90da96c732af | refs/heads/master | 2023-08-17T12:27:00.640834 | 2023-08-07T17:03:00 | 2023-08-07T17:03:00 | 158,981,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | # write a code to check if two str are equal
def is_equal(s1, s2):
for i in range(len(s1)):
# if len(s2) - 1 >= i:
# return False
if len(s2) - 1 >= i or (s1[i].lower() != s2[i].lower()):
return False
for i in range(len(s2)):
# if len(s1) - 1 >= i:
# return False
if len(s1) - 1 >= i or (s1[i].lower() != s2[i].lower()): # i = 4, len(s1) = 4
return False
return True
s1 = "abcd" # 4
s2 = "ABCDj" # 5
print(is_equal(s1, s2)) | [
"[email protected]"
] | |
edd919cfe5efef37c9386e8f94227f5bb2b80185 | 09ba5ae2edc51f3fd812b9205188b1b01e6bea77 | /test/src/CPMel/core/metaclass.py | 61cec13411e956af6f67e786d3014ce281188ff7 | [] | no_license | cpcgskill/Maya_tools | c6a43ad20eab3b97e82c9dfe40a1745b6098e5c4 | 93f9e66e5dc3bb51f33df0615415a56a60613ff1 | refs/heads/main | 2023-02-26T16:20:52.959050 | 2021-01-28T06:12:18 | 2021-01-28T06:12:18 | 325,512,423 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 854 | py | #!/usr/bin/python
# -*-coding:utf-8 -*-
u"""
:创建时间: 2020/5/18 23:57
:作者: 苍之幻灵
:我的主页: https://cpcgskill.com
:QQ: 2921251087
:爱发电: https://afdian.net/@Phantom_of_the_Cang
:aboutcg: https://www.aboutcg.org/teacher/54335
:bilibili: https://space.bilibili.com/351598127
"""
import functools
def newClass(name, bases, attrs):
u"""
构建元类使用此元类的类在创建时自动创建对象
:param name:
:param bases:
:param attrs:
:return:
"""
cls = type(name, bases, attrs)
return cls()
def createClass(name, bases, attrs):
u"""
创建器元类
以此为元类的类在创建时将不会自动调用__init__
:param name:
:param bases:
:param attrs:
:return:
"""
cls = type(name, bases, attrs)
return functools.partial(cls.__new__, cls)
| [
"www.cpcgskill.com"
] | www.cpcgskill.com |
053b8628f236c89b6e4071334424c1a57a3c1d50 | 5cbde24d02eea9e762994af976aff8b4fdc731b3 | /actus/wsgi.py | 657d6f36c1718379520c54b937aa9fb42599d2c5 | [] | no_license | paulo-romano/actus | f94e874ef3351181c79539ba69df9f7bbdb9e90f | d424afa6672f6f714f094b2080d0255bad257268 | refs/heads/master | 2021-01-17T15:06:40.486493 | 2016-12-17T00:47:03 | 2016-12-17T00:59:53 | 70,018,546 | 2 | 1 | null | 2016-11-04T00:05:47 | 2016-10-05T00:41:06 | JavaScript | UTF-8 | Python | false | false | 424 | py | """
WSGI config for actus project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "actus.settings")
application = Cling(get_wsgi_application())
| [
"[email protected]"
] | |
a8e18dcbe6113a775bc2a7239cc76ff8420db740 | 3fb0ce33f00b96ae3808a32da44de3e887434afb | /.提出一覧/AtCoder/ABC156/b/main.py | 54120439e9bf75e86574bad0f396250ddd7c9bf0 | [] | no_license | Yukikazari/kyoupuro | ca3d74d8db024b1988cd0ff00bf069ab739783d7 | 343de455c4344dbcfa4524b492f7f6205c9db26f | refs/heads/master | 2023-02-21T01:53:52.403729 | 2021-01-27T03:55:01 | 2021-01-27T03:55:01 | 282,222,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | #!/usr/bin/env python3
#import
#import math
#import numpy as np
#= int(input())
#= input()
N, K = map(int, input().split())
for i in range(1, 10 ** 6):
if K ** i > N:
print(i)
exit()
| [
"[email protected]"
] | |
93077dc4d63732f42922d4c942ec5ed4352f5da7 | bc441bb06b8948288f110af63feda4e798f30225 | /patch_manager_sdk/api/patch_task/list_task_pb2.py | 4516260317484520a501c4388802ba9301b167a4 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 15,278 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: list_task.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='list_task.proto',
package='patch_task',
syntax='proto3',
serialized_options=None,
serialized_pb=_b('\n\x0flist_task.proto\x12\npatch_task\"[\n\x14ListPatchTaskRequest\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x11\n\tstartTime\x18\x03 \x01(\x05\x12\x0f\n\x07\x65ndTime\x18\x04 \x01(\x05\"\x81\x03\n\x15ListPatchTaskResponse\x12\x0c\n\x04page\x18\x01 \x01(\x05\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\r\n\x05total\x18\x03 \x01(\x05\x12\x34\n\x04list\x18\x04 \x03(\x0b\x32&.patch_task.ListPatchTaskResponse.List\x1a\x81\x02\n\x04List\x12\x0e\n\x06taskId\x18\x01 \x01(\t\x12?\n\x07request\x18\x02 \x03(\x0b\x32..patch_task.ListPatchTaskResponse.List.Request\x12\x0f\n\x07\x63reator\x18\x03 \x01(\t\x12\r\n\x05\x63time\x18\x04 \x01(\x05\x12\r\n\x05\x65time\x18\x05 \x01(\x05\x12\x0e\n\x06status\x18\x06 \x01(\t\x12\x11\n\tgroupSize\x18\x07 \x01(\x05\x12\x16\n\x0eprocessedCount\x18\x08 \x01(\x05\x1a>\n\x07Request\x12\x0e\n\x06hostId\x18\x01 \x01(\t\x12\x0e\n\x06hostIp\x18\x02 \x01(\t\x12\x13\n\x0bpatchIdList\x18\x03 \x03(\t\"\x81\x01\n\x1cListPatchTaskResponseWrapper\x12\x0c\n\x04\x63ode\x18\x01 \x01(\x05\x12\x13\n\x0b\x63odeExplain\x18\x02 \x01(\t\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12/\n\x04\x64\x61ta\x18\x04 \x01(\x0b\x32!.patch_task.ListPatchTaskResponseb\x06proto3')
)
_LISTPATCHTASKREQUEST = _descriptor.Descriptor(
name='ListPatchTaskRequest',
full_name='patch_task.ListPatchTaskRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='patch_task.ListPatchTaskRequest.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='patch_task.ListPatchTaskRequest.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='startTime', full_name='patch_task.ListPatchTaskRequest.startTime', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='endTime', full_name='patch_task.ListPatchTaskRequest.endTime', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=31,
serialized_end=122,
)
_LISTPATCHTASKRESPONSE_LIST_REQUEST = _descriptor.Descriptor(
name='Request',
full_name='patch_task.ListPatchTaskResponse.List.Request',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='hostId', full_name='patch_task.ListPatchTaskResponse.List.Request.hostId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='hostIp', full_name='patch_task.ListPatchTaskResponse.List.Request.hostIp', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='patchIdList', full_name='patch_task.ListPatchTaskResponse.List.Request.patchIdList', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=448,
serialized_end=510,
)
_LISTPATCHTASKRESPONSE_LIST = _descriptor.Descriptor(
name='List',
full_name='patch_task.ListPatchTaskResponse.List',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='taskId', full_name='patch_task.ListPatchTaskResponse.List.taskId', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='request', full_name='patch_task.ListPatchTaskResponse.List.request', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='creator', full_name='patch_task.ListPatchTaskResponse.List.creator', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='ctime', full_name='patch_task.ListPatchTaskResponse.List.ctime', index=3,
number=4, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='etime', full_name='patch_task.ListPatchTaskResponse.List.etime', index=4,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='patch_task.ListPatchTaskResponse.List.status', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='groupSize', full_name='patch_task.ListPatchTaskResponse.List.groupSize', index=6,
number=7, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='processedCount', full_name='patch_task.ListPatchTaskResponse.List.processedCount', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTPATCHTASKRESPONSE_LIST_REQUEST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=253,
serialized_end=510,
)
_LISTPATCHTASKRESPONSE = _descriptor.Descriptor(
name='ListPatchTaskResponse',
full_name='patch_task.ListPatchTaskResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page', full_name='patch_task.ListPatchTaskResponse.page', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='patch_task.ListPatchTaskResponse.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total', full_name='patch_task.ListPatchTaskResponse.total', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='list', full_name='patch_task.ListPatchTaskResponse.list', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_LISTPATCHTASKRESPONSE_LIST, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=125,
serialized_end=510,
)
_LISTPATCHTASKRESPONSEWRAPPER = _descriptor.Descriptor(
name='ListPatchTaskResponseWrapper',
full_name='patch_task.ListPatchTaskResponseWrapper',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='patch_task.ListPatchTaskResponseWrapper.code', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='codeExplain', full_name='patch_task.ListPatchTaskResponseWrapper.codeExplain', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='error', full_name='patch_task.ListPatchTaskResponseWrapper.error', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='patch_task.ListPatchTaskResponseWrapper.data', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=513,
serialized_end=642,
)
_LISTPATCHTASKRESPONSE_LIST_REQUEST.containing_type = _LISTPATCHTASKRESPONSE_LIST
_LISTPATCHTASKRESPONSE_LIST.fields_by_name['request'].message_type = _LISTPATCHTASKRESPONSE_LIST_REQUEST
_LISTPATCHTASKRESPONSE_LIST.containing_type = _LISTPATCHTASKRESPONSE
_LISTPATCHTASKRESPONSE.fields_by_name['list'].message_type = _LISTPATCHTASKRESPONSE_LIST
_LISTPATCHTASKRESPONSEWRAPPER.fields_by_name['data'].message_type = _LISTPATCHTASKRESPONSE
DESCRIPTOR.message_types_by_name['ListPatchTaskRequest'] = _LISTPATCHTASKREQUEST
DESCRIPTOR.message_types_by_name['ListPatchTaskResponse'] = _LISTPATCHTASKRESPONSE
DESCRIPTOR.message_types_by_name['ListPatchTaskResponseWrapper'] = _LISTPATCHTASKRESPONSEWRAPPER
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListPatchTaskRequest = _reflection.GeneratedProtocolMessageType('ListPatchTaskRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTPATCHTASKREQUEST,
'__module__' : 'list_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.ListPatchTaskRequest)
})
_sym_db.RegisterMessage(ListPatchTaskRequest)
ListPatchTaskResponse = _reflection.GeneratedProtocolMessageType('ListPatchTaskResponse', (_message.Message,), {
'List' : _reflection.GeneratedProtocolMessageType('List', (_message.Message,), {
'Request' : _reflection.GeneratedProtocolMessageType('Request', (_message.Message,), {
'DESCRIPTOR' : _LISTPATCHTASKRESPONSE_LIST_REQUEST,
'__module__' : 'list_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.ListPatchTaskResponse.List.Request)
})
,
'DESCRIPTOR' : _LISTPATCHTASKRESPONSE_LIST,
'__module__' : 'list_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.ListPatchTaskResponse.List)
})
,
'DESCRIPTOR' : _LISTPATCHTASKRESPONSE,
'__module__' : 'list_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.ListPatchTaskResponse)
})
_sym_db.RegisterMessage(ListPatchTaskResponse)
_sym_db.RegisterMessage(ListPatchTaskResponse.List)
_sym_db.RegisterMessage(ListPatchTaskResponse.List.Request)
ListPatchTaskResponseWrapper = _reflection.GeneratedProtocolMessageType('ListPatchTaskResponseWrapper', (_message.Message,), {
'DESCRIPTOR' : _LISTPATCHTASKRESPONSEWRAPPER,
'__module__' : 'list_task_pb2'
# @@protoc_insertion_point(class_scope:patch_task.ListPatchTaskResponseWrapper)
})
_sym_db.RegisterMessage(ListPatchTaskResponseWrapper)
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
263d88bc1127e17bd9788a19259e6a996b95f48f | 8ce2b8314fd2e11f3118f7b57f15d1aeb661eec9 | /backend/bagel_buoy_1801/settings.py | e5641f87d26096e7d8b6da1fdacf8134eb1580ba | [] | no_license | crowdbotics-apps/bagel-buoy-1801 | 2b4f17b3ea8f56fc574f01736900a9d15a216ca8 | d053cf93ff55a0dab5d6af49a6351fe740022ac0 | refs/heads/master | 2022-12-09T00:33:29.351845 | 2019-03-30T22:14:51 | 2019-03-30T22:14:51 | 178,616,820 | 0 | 0 | null | 2022-12-03T04:13:29 | 2019-03-30T22:14:47 | JavaScript | UTF-8 | Python | false | false | 4,758 | py | """
Django settings for bagel_buoy_1801 project.
Generated by 'django-admin startproject' using Django 1.11.16.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
env = environ.Env()
environ.Env.read_env(os.path.join(BASE_DIR, '.env'))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool('DEBUG', default=True)
ALLOWED_HOSTS = ['*']
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
]
ROOT_URLCONF = 'bagel_buoy_1801.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bagel_buoy_1801.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bagel_buoy_1801',
'USER': 'bagel_buoy_1801',
'PASSWORD': 'bagel_buoy_1801',
'HOST': 'localhost',
'PORT': '5432',
}
}
if env.str('DATABASE_URL', default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
# allauth
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = None
LOGIN_REDIRECT_URL = '/'
if DEBUG:
# output email to console instead of sending
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
EMAIL_HOST = 'smtp.sendgrid.net'
EMAIL_HOST_USER = env.str('SENDGRID_USERNAME', '')
EMAIL_HOST_PASSWORD = env.str('SENDGRID_PASSWORD', '')
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Import local settings
try:
from .local_settings import *
INSTALLED_APPS += DEBUG_APPS
except:
pass
| [
"[email protected]"
] | |
6509b905b984eb3598af4a3d6006cd6728c0a5b0 | f114cd3da2ca11a8635e8f25c82e2cbbe4bf25c5 | /python/swexpert/sw2058.py | 8712ff9a1d5e263912097d0f2f76ef2e1b9650b6 | [] | no_license | mizm/TIL | 86641e0565e28b482148da84e98c4a32b90356de | 62da3fca85335f833a6f3462fd834cd87eb492c8 | refs/heads/master | 2021-06-11T08:11:18.670048 | 2021-04-19T02:09:17 | 2021-04-19T02:09:17 | 162,208,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | s = input()
print(sum([int(s[i]) for i in range(len(s))])) | [
"[email protected]"
] | |
d7600096286394a49b83fc56e6f04ee102c8d3b4 | 53e58c213232e02250e64f48b97403ca86cd02f9 | /16/mc/ExoDiBosonResonances/EDBRTreeMaker/test/crab3_analysisM3000_R_0-9.py | fa92ea4e8a878d40f96c26dc962f729463d22f78 | [] | no_license | xdlyu/fullRunII_ntuple_102X | 32e79c3bbc704cfaa00c67ab5124d40627fdacaf | d420b83eb9626a8ff1c79af5d34779cb805d57d8 | refs/heads/master | 2020-12-23T15:39:35.938678 | 2020-05-01T14:41:38 | 2020-05-01T14:41:38 | 237,192,426 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,303 | py | from WMCore.Configuration import Configuration
name = 'WWW/sig'
steam_dir = 'xulyu'
config = Configuration()
config.section_("General")
config.General.requestName = 'M3000_R0-9_off'
config.General.transferLogs = True
config.section_("JobType")
config.JobType.pluginName = 'Analysis'
config.JobType.inputFiles = ['Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFchs.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK8PFPuppi.txt','Summer16_07Aug2017_V11_MC_L1FastJet_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L2Relative_AK4PFPuppi.txt','Summer16_07Aug2017_V11_MC_L3Absolute_AK4PFPuppi.txt']
#config.JobType.inputFiles = ['PHYS14_25_V2_All_L1FastJet_AK4PFchs.txt','PHYS14_25_V2_All_L2Relative_AK4PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK4PFchs.txt','PHYS14_25_V2_All_L1FastJet_AK8PFchs.txt','PHYS14_25_V2_All_L2Relative_AK8PFchs.txt','PHYS14_25_V2_All_L3Absolute_AK8PFchs.txt']
# Name of the CMSSW configuration file
#config.JobType.psetName = 'bkg_ana.py'
config.JobType.psetName = 'analysis_sig.py'
#config.JobType.allowUndistributedCMSSW = True
config.JobType.allowUndistributedCMSSW = True
config.section_("Data")
#config.Data.inputDataset = '/WJetsToLNu_13TeV-madgraph-pythia8-tauola/Phys14DR-PU20bx25_PHYS14_25_V1-v1/MINIAODSIM'
config.Data.inputDataset = '/WkkToWRadionToWWW_M3000-R0-9-TuneCUETP8M1_13TeV-madgraph-pythia/RunIISummer16MiniAODv3-PUMoriond17_94X_mcRun2_asymptotic_v3-v1/MINIAODSIM'
#config.Data.inputDBS = 'global'
config.Data.inputDBS = 'global'
config.Data.splitting = 'FileBased'
config.Data.unitsPerJob =5
config.Data.totalUnits = -1
config.Data.publication = False
config.Data.outLFNDirBase = '/store/group/dpg_trigger/comm_trigger/TriggerStudiesGroup/STEAM/' + steam_dir + '/' + name + '/'
# This string is used to construct the output dataset name
config.Data.outputDatasetTag = 'M3000_R0-9_off'
config.section_("Site")
# Where the output files will be transmitted to
config.Site.storageSite = 'T2_CH_CERN'
| [
"[email protected]"
] | |
624480f7f2ed0cbdd5c554530d35447d513dcd1b | 02778455d6c88a4e83bbad836f4598d49ebe81e5 | /recipes/shared_logging/server.py | 70d9aafd35f8e98eb1fdfc2509c9dea385db9c5a | [
"MIT"
] | permissive | stjordanis/easyrpc | d703ad81e7c2a5cb83dab2e5a424baeea5d997c6 | 1c0d6f8c33aaf70ccf62d75777f5e4ca8c55fedc | refs/heads/main | 2023-08-13T05:52:18.459507 | 2021-10-13T20:15:44 | 2021-10-13T20:15:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | # central logging server
import logging
from fastapi import FastAPI
from easyrpc.server import EasyRpcServer
logging.basicConfig()
server = FastAPI()
@server.on_event('startup')
async def setup():
logger = logging.getLogger()
rpc_server = EasyRpcServer(server, '/ws/server', server_secret='abcd1234', debug=True)
rpc_server.register_logger(logger, namespace='logger') | [
"[email protected]"
] | |
ce0994b51ccee45b1b6cb2f4bcb1f11296c7c002 | 538833a15b119ca835b82886ca047dc25e71f134 | /app/bin/file/text_remove_duplicate.py | 76f01c83d8fd0252f6345e396e259a54a5368c1d | [] | no_license | buxizhizhoum/tool_scripts | 901ffb3749aa9521912636039bc897f969759d67 | d13b9217b4cde6b626451e9638d737911a0911c5 | refs/heads/master | 2021-01-01T15:39:01.396282 | 2018-12-11T06:53:29 | 2018-12-11T06:53:29 | 97,667,877 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
def text_remove_duplicate(original_file, processed_file):
file_buffer = []
with open(original_file, "r") as f:
for line in f.readlines():
if line not in file_buffer:
file_buffer.append(line)
with open(processed_file, "w") as f:
f.writelines(file_buffer)
text_remove_duplicate("a.txt", "b.txt") | [
"[email protected]"
] | |
5cd33b20e5bc4c1c4b6e25e9df92b6fdc8d17e1a | 5ec06dab1409d790496ce082dacb321392b32fe9 | /clients/python/generated/swaggeraemosgi/model/com_adobe_granite_system_monitoring_impl_system_stats_m_bean_impl_info.py | 3e05ccd3726996c38d3fdaef1f4e610603a6ae96 | [
"Apache-2.0"
] | permissive | shinesolutions/swagger-aem-osgi | e9d2385f44bee70e5bbdc0d577e99a9f2525266f | c2f6e076971d2592c1cbd3f70695c679e807396b | refs/heads/master | 2022-10-29T13:07:40.422092 | 2021-04-09T07:46:03 | 2021-04-09T07:46:03 | 190,217,155 | 3 | 3 | Apache-2.0 | 2022-10-05T03:26:20 | 2019-06-04T14:23:28 | null | UTF-8 | Python | false | false | 7,630 | py | """
Adobe Experience Manager OSGI config (AEM) API
Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # noqa: E501
The version of the OpenAPI document: 1.0.0-pre.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import nulltype # noqa: F401
from swaggeraemosgi.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from swaggeraemosgi.model.com_adobe_granite_system_monitoring_impl_system_stats_m_bean_impl_properties import ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties
globals()['ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties'] = ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties
class ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplInfo(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'pid': (str,), # noqa: E501
'title': (str,), # noqa: E501
'description': (str,), # noqa: E501
'properties': (ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'pid': 'pid', # noqa: E501
'title': 'title', # noqa: E501
'description': 'description', # noqa: E501
'properties': 'properties', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplInfo - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
pid (str): [optional] # noqa: E501
title (str): [optional] # noqa: E501
description (str): [optional] # noqa: E501
properties (ComAdobeGraniteSystemMonitoringImplSystemStatsMBeanImplProperties): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| [
"[email protected]"
] | |
e7eda5397bfd521186cf038a7a0de9700c42024a | 871d2a367e45164f21ecdbefe52bf442b563b33c | /tests/tests/correctness/EPLAnalytics/Streaming_Calculations/FFT/fft_cor_003/run.py | 9d18dc0589837f3b625b0e287ef5aa58bf669523 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | SoftwareAG/apama-industry-analytics-kit | c0f6c30badf31411a29bc6daa4a7125b76f4e737 | a3f6039915501d41251b6f7ec41b0cb8111baf7b | refs/heads/master | 2022-02-19T20:47:27.180233 | 2022-02-02T12:58:23 | 2022-02-02T12:58:23 | 185,572,282 | 3 | 2 | Apache-2.0 | 2022-02-02T12:58:24 | 2019-05-08T09:14:07 | Python | UTF-8 | Python | false | false | 2,472 | py | # $Copyright (c) 2015 Software AG, Darmstadt, Germany and/or Software AG USA Inc., Reston, VA, USA, and/or Terracotta Inc., San Francisco, CA, USA, and/or Software AG (Canada) Inc., Cambridge, Ontario, Canada, and/or, Software AG (UK) Ltd., Derby, United Kingdom, and/or Software A.G. (Israel) Ltd., Or-Yehuda, Israel and/or their licensors.$
# Use, reproduction, transfer, publication or disclosure is prohibited except as specifically provided for in your License Agreement with Software AG
from industry.framework.AnalyticsBaseTest import AnalyticsBaseTest
from pysys.constants import *
class PySysTest(AnalyticsBaseTest):
def execute(self):
# Start the correlator
correlator = self.startTest()
self.injectAnalytic(correlator)
self.injectFFTAnalysis(correlator)
self.ready(correlator)
correlator.injectMonitorscript(['test.mon'], self.input)
self.waitForSignal('correlator.out', expr='TEST COMPLETE', condition='==1', timeout=5)
def validate(self):
# Basic sanity checks
self.checkSanity()
# Ensure the test output was correct
exprList=[]
exprList.append('FAILED TO CREATE ANALYTIC: 1')
exprList.append('TEST PASSED: 2')
exprList.append('FAILED TO CREATE ANALYTIC: 3')
exprList.append('FAILED TO CREATE ANALYTIC: 4')
exprList.append('FAILED TO CREATE ANALYTIC: 5')
exprList.append('FAILED TO CREATE ANALYTIC: 6')
exprList.append('FAILED TO CREATE ANALYTIC: 7')
exprList.append('FAILED TO CREATE ANALYTIC: 8')
exprList.append('FAILED TO CREATE ANALYTIC: 9')
exprList.append('FAILED TO CREATE ANALYTIC: 10')
exprList.append('FAILED TO CREATE ANALYTIC: 11')
exprList.append('FAILED TO CREATE ANALYTIC: 12')
exprList.append('FAILED TO CREATE ANALYTIC: 13')
exprList.append('FAILED TO CREATE ANALYTIC: 14')
exprList.append('FAILED TO CREATE ANALYTIC: 15')
exprList.append('FAILED TO CREATE ANALYTIC: 16')
exprList.append('FAILED TO CREATE ANALYTIC: 17')
exprList.append('FAILED TO CREATE ANALYTIC: 18')
exprList.append('FAILED TO CREATE ANALYTIC: 19')
exprList.append('FAILED TO CREATE ANALYTIC: 20')
exprList.append('TEST PASSED: 21')
self.assertOrderedGrep("correlator.out", exprList=exprList)
# Make sure that the we got the right number of actions/listeners called
self.assertLineCount('correlator.out', expr='TEST PASSED', condition='==2')
self.assertLineCount('correlator.out', expr='FAILED TO CREATE ANALYTIC:', condition='==19')
| [
"[email protected]"
] | |
a25599fcc363658ae14985fb1168f14a33ecb67e | ef7a5e1445706482a0e20d2632f6cd3d0e279031 | /amy/extrequests/migrations/0026_auto_20201107_1428.py | f1264cda61cc04a509b387aff45fb9e84eeac2d9 | [
"MIT"
] | permissive | pbanaszkiewicz/amy | 7bf054463f4ecfa217cc9e52a7927d22d32bcd84 | f97631b2f3dd8e8f502e90bdb04dd72f048d4837 | refs/heads/develop | 2022-11-17T18:56:18.975192 | 2022-11-03T23:19:41 | 2022-11-03T23:19:41 | 28,005,098 | 0 | 3 | MIT | 2018-03-20T18:48:55 | 2014-12-14T19:25:22 | Python | UTF-8 | Python | false | false | 714 | py | # Generated by Django 2.2.13 on 2020-11-07 14:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('extrequests', '0025_auto_20201105_1949'),
]
operations = [
migrations.AddField(
model_name='selforganisedsubmission',
name='end',
field=models.DateField(null=True, verbose_name='Workshop end date'),
),
migrations.AddField(
model_name='selforganisedsubmission',
name='start',
field=models.DateField(help_text='Please provide the dates that your Self-Organised workshop will run.', null=True, verbose_name='Workshop start date'),
),
]
| [
"[email protected]"
] | |
c298137ca5f8ba3d23d361dc3cc858f6eb4f2f2e | 15a0797f087a9c05b7a679f47fefeeb875affab5 | /fermipy/validate/utils.py | af79b178f1eb202b6ff272a7d3fa1304526c98b8 | [
"BSD-3-Clause"
] | permissive | XanAstia/fermipy | 2496a6a07980faff20958f1a20ad1a3171bf7b35 | 8d9995934fd44959d51ad7bdcd2981b3694fa35e | refs/heads/master | 2021-01-05T20:03:15.590334 | 2020-07-22T12:35:18 | 2020-07-22T12:35:18 | 257,225,629 | 0 | 0 | BSD-3-Clause | 2020-06-24T13:45:52 | 2020-04-20T09:00:16 | Python | UTF-8 | Python | false | false | 4,675 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import copy
import re
import yaml
import sys
import mimetypes
import tempfile
import string
import random
from os.path import splitext, basename
import xml.etree.cElementTree as ElementTree
import argparse
import numpy as np
def rand_str(size=7):
chars = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(chars) for x in range(size))
def replace_aliases(cut_dict, aliases):
"""Substitute aliases in a cut dictionary."""
for k, v in cut_dict.items():
for k0, v0 in aliases.items():
cut_dict[k] = cut_dict[k].replace(k0, '(%s)' % v0)
def strip(input_str):
"""Strip newlines and whitespace from a string."""
return str(input_str.replace('\n', '').replace(' ', ''))
def get_files(files, extnames=['.root']):
"""Extract a list of file paths from a list containing both paths
and file lists with one path per line."""
files_out = []
for f in files:
mime = mimetypes.guess_type(f)
if os.path.splitext(f)[1] in extnames:
files_out += [f]
elif mime[0] == 'text/plain':
files_out += list(np.loadtxt(f, unpack=True, dtype='str'))
else:
raise Exception('Unrecognized input type.')
return files_out
def load_chain(chain, files, nfiles=None):
if isinstance(nfiles, list) and len(nfiles) == 1:
files = files[:nfiles[0]]
elif isinstance(nfiles, list) and len(nfiles) >= 2:
files = files[nfiles[0]:nfiles[1]]
elif nfiles is not None:
files = files[:nfiles]
print("Loading %i files..." % len(files))
for f in files:
chain.Add(f)
return chain
def load_aliases(alias_files):
aliases = {}
for f in alias_files:
if f.endswith('.xml'):
aliases.update(get_cuts_from_xml(f))
elif f.endswith('.yaml'):
aliases.update(yaml.load(open(f, 'r')))
else:
raise Exception('Invalid file type for aliases option.')
return aliases
def get_cuts_from_xml(xmlfile):
"""Extract event selection strings from the XML file."""
root = ElementTree.ElementTree(file=xmlfile).getroot()
event_maps = root.findall('EventMap')
alias_maps = root.findall('AliasDict')[0]
event_classes = {}
event_types = {}
event_aliases = {}
for m in event_maps:
if m.attrib['altName'] == 'EVENT_CLASS':
for c in m.findall('EventCategory'):
event_classes[c.attrib['name']] = strip(
c.find('ShortCut').text)
elif m.attrib['altName'] == 'EVENT_TYPE':
for c in m.findall('EventCategory'):
event_types[c.attrib['name']] = strip(c.find('ShortCut').text)
for m in alias_maps.findall('Alias'):
event_aliases[m.attrib['name']] = strip(m.text)
replace_aliases(event_aliases, event_aliases.copy())
replace_aliases(event_aliases, event_aliases.copy())
replace_aliases(event_classes, event_aliases)
replace_aliases(event_types, event_aliases)
event_selections = {}
event_selections.update(event_classes)
event_selections.update(event_types)
event_selections.update(event_aliases)
return event_selections
def set_event_list(tree, selection=None, fraction=None, start_fraction=None):
"""
Set the event list for a tree or chain.
Parameters
----------
tree : `ROOT.TTree`
Input tree/chain.
selection : str
Cut string defining the event list.
fraction : float
Fraction of the total file to include in the event list
starting from the *end* of the file.
"""
import ROOT
elist = rand_str()
if selection is None:
cuts = ''
else:
cuts = selection
if fraction is None or fraction >= 1.0:
n = tree.Draw(">>%s" % elist, cuts, "goff")
tree.SetEventList(ROOT.gDirectory.Get(elist))
elif start_fraction is None:
nentries = int(tree.GetEntries())
first_entry = min(int((1.0 - fraction) * nentries), nentries)
n = tree.Draw(">>%s" % elist, cuts, "goff", nentries, first_entry)
tree.SetEventList(ROOT.gDirectory.Get(elist))
else:
nentries = int(tree.GetEntries())
first_entry = min(int(start_fraction * nentries), nentries)
n = first_entry + int(nentries * fraction)
n = tree.Draw(">>%s" % elist, cuts, "goff",
n - first_entry, first_entry)
tree.SetEventList(ROOT.gDirectory.Get(elist))
return n
| [
"[email protected]"
] | |
26dcfc08a00b7aeb3c786eddbad0189fcb96d23a | 21b0b4c27193898207751c91b8b2ed168a1b1638 | /py/py_0637_flexible_digit_sum.py | 1f93090be6969f91e0df5a37180db7d4318b6121 | [
"MIT"
] | permissive | lcsm29/project-euler | 67560a4e66968f1671a3d7ecf2dda6c956893dca | fab794ece5aa7a11fc7c2177f26250f40a5b1447 | refs/heads/main | 2023-07-04T11:45:24.374841 | 2021-08-07T08:20:41 | 2021-08-07T08:20:41 | 371,808,781 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 984 | py | # Solution of;
# Project Euler Problem 637: Flexible digit sum
# https://projecteuler.net/problem=637
#
# Given any positive integer $n$, we can construct a new integer by inserting
# plus signs between some of the digits of the base $B$ representation of $n$,
# and then carrying out the additions. For example, from $n=123_{10}$ ($n$ in
# base 10) we can construct the four base 10 integers $123_{10}$,
# $1+23=24_{10}$, $12+3=15_{10}$ and $1+2+3=6_{10}$Let $f(n,B)$ be the
# smallest number of steps needed to arrive at a single-digit number in base
# $B$. For example, $f(7,10)=0$ and $f(123,10)=1$. Let $g(n,B_1,B_2)$ be the
# sum of the positive integers $i$ not exceeding $n$ such that
# $f(i,B_1)=f(i,B_2)$. You are given $g(100,10,3)=3302$. Find $g(10^7,10,3)$
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 637
timed.caller(dummy, n, i, prob_id)
| [
"[email protected]"
] | |
92e3cdf8225d45ea6513de9fe7fb005957dc43f2 | dc2682f687a203dcf5f4f4260f857ef5099bbdab | /src/bootstrapping_olympics/interfaces/rep_nuisance_causal.py | fbe394ac400c52c679a98d561a9b9c3e359c92b9 | [] | no_license | AndreaCensi/boot_olympics | 1bc3d0cd887ca6b47a159929b53032c298979450 | dc05e283bde01cafc4843d82f17413b13c6ce1af | refs/heads/master | 2020-07-08T10:49:37.368104 | 2013-07-19T07:00:22 | 2013-07-19T07:00:22 | 2,098,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 800 | py | from .boot_spec import BootSpec
from abc import abstractmethod
from blocks import SimpleBlackBox
from contracts import ContractsMeta, contract
__all__ = ['RepresentationNuisanceCausal']
class RepresentationNuisanceCausal(object):
'''
'''
__metaclass__ = ContractsMeta
class NotInvertible(Exception):
pass
def inverse(self):
'''
Returns the inverse representation nuisance,
or raises NotInvertible
'''
@contract(spec=BootSpec, returns=BootSpec)
def transform_spec(self, spec):
'''
'''
@abstractmethod
@contract(returns=SimpleBlackBox)
def get_pre(self):
pass
@abstractmethod
@contract(returns=SimpleBlackBox)
def get_post(self):
pass
| [
"[email protected]"
] | |
dff763da53b7be13b548bd30531adeb22a32193d | bb005bbd0e71d968beb2fc7d7bd88b0cd70def1c | /pytype/mixin.py | d3a9bd998cb15a79e1fd8905c1e4fd5c98f7fb21 | [
"Apache-2.0",
"MIT"
] | permissive | sawravchy/pytype | eec072afd261b6c7ab6502699c56c13bd6e529fa | 284d0f0edb3c60cf02367645bf8a8d055ca50fe9 | refs/heads/master | 2022-10-12T13:30:32.607779 | 2020-06-12T23:59:49 | 2020-06-12T23:59:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,732 | py | """Mixins for abstract.py."""
import logging
from pytype import abstract_utils
from pytype import datatypes
from pytype import function
from pytype.pytd import mro
from pytype.pytd import pytd
import six
log = logging.getLogger(__name__)
class MixinMeta(type):
"""Metaclass for mix-ins."""
def __init__(cls, name, superclasses, *args, **kwargs):
super(MixinMeta, cls).__init__(name, superclasses, *args, **kwargs)
for sup in superclasses:
if hasattr(sup, "overloads"):
for method in sup.overloads:
if method not in cls.__dict__:
setattr(cls, method, getattr(sup, method))
# Record the fact that we have set a method on the class, to do
# superclass lookups.
if "__mixin_overloads__" in cls.__dict__:
cls.__mixin_overloads__[method] = sup
else:
setattr(cls, "__mixin_overloads__", {method: sup})
def super(cls, method):
"""Imitate super() in a mix-in.
This method is a substitute for
super(MixinClass, self).overloaded_method(arg),
which we can't use because mix-ins appear at the end of the MRO. It should
be called as
MixinClass.super(self.overloaded_method)(arg)
. It works by finding the class on which MixinMeta.__init__ set
MixinClass.overloaded_method and calling super() on that class.
Args:
method: The method in the mix-in.
Returns:
The method overloaded by 'method'.
"""
for supercls in type(method.__self__).__mro__:
# Fetch from __dict__ rather than using getattr() because we only want
# to consider methods defined on supercls itself (not on a parent).
if ("__mixin_overloads__" in supercls.__dict__ and
supercls.__mixin_overloads__.get(method.__name__) is cls):
method_cls = supercls
break
return getattr(super(method_cls, method.__self__), method.__name__)
@six.add_metaclass(MixinMeta)
class PythonConstant(object):
"""A mix-in for storing actual Python constants, not just their types.
This is used for things that are stored in cfg.Variable, but where we
may need the actual data in order to proceed later. E.g. function / class
definitions, tuples. Also, potentially: Small integers, strings (E.g. "w",
"r" etc.).
"""
overloads = ("__repr__",)
def init_mixin(self, pyval):
"""Mix-in equivalent of __init__."""
self.pyval = pyval
def str_of_constant(self, printer):
"""Get a string representation of this constant.
Args:
printer: An AtomicAbstractValue -> str function that will be used to
print abstract values.
Returns:
A string of self.pyval.
"""
del printer
return repr(self.pyval)
def __repr__(self):
return "<%s %r>" % (self.name, self.str_of_constant(str))
@six.add_metaclass(MixinMeta)
class HasSlots(object):
"""Mix-in for overriding slots with custom methods.
This makes it easier to emulate built-in classes like dict which need special
handling of some magic methods (__setitem__ etc.)
"""
overloads = ("get_special_attribute",)
def init_mixin(self):
self._slots = {}
self._super = {}
self._function_cache = {}
def make_native_function(self, name, method):
key = (name, method)
if key not in self._function_cache:
self._function_cache[key] = self.vm.make_native_function(name, method)
return self._function_cache[key]
def set_slot(self, name, method):
"""Add a new slot to this value."""
assert name not in self._slots, "slot %s already occupied" % name
_, attr = self.vm.attribute_handler.get_attribute(
self.vm.root_cfg_node, self, name,
self.to_binding(self.vm.root_cfg_node))
self._super[name] = attr
f = self.make_native_function(name, method)
self._slots[name] = f.to_variable(self.vm.root_cfg_node)
def call_pytd(self, node, name, *args):
"""Call the (original) pytd version of a method we overwrote."""
return self.vm.call_function(node, self._super[name], function.Args(args),
fallback_to_unsolvable=False)
def get_special_attribute(self, node, name, valself):
if name in self._slots:
attr = self.vm.program.NewVariable()
additional_sources = {valself} if valself else None
attr.PasteVariable(self._slots[name], node, additional_sources)
return attr
return HasSlots.super(self.get_special_attribute)(node, name, valself)
@six.add_metaclass(MixinMeta)
class Class(object):
"""Mix-in to mark all class-like values."""
overloads = ("get_special_attribute", "get_own_new", "call", "compute_mro")
def __new__(cls, *unused_args, **unused_kwds):
"""Prevent direct instantiation."""
assert cls is not Class, "Cannot instantiate Class"
return object.__new__(cls)
def init_mixin(self, metaclass):
"""Mix-in equivalent of __init__."""
if metaclass is None:
self.cls = self._get_inherited_metaclass()
else:
# TODO(rechen): Check that the metaclass is a (non-strict) subclass of the
# metaclasses of the base classes.
self.cls = metaclass
# Key-value store of metadata for overlays to use.
self.metadata = {}
self._instance_cache = {}
self._init_abstract_methods()
self._init_protocol_methods()
self._init_overrides_bool()
self._all_formal_type_parameters = datatypes.AliasingMonitorDict()
self._all_formal_type_parameters_loaded = False
def bases(self):
return []
@property
def all_formal_type_parameters(self):
self._load_all_formal_type_parameters()
return self._all_formal_type_parameters
def _load_all_formal_type_parameters(self):
"""Load _all_formal_type_parameters."""
if self._all_formal_type_parameters_loaded:
return
bases = [
abstract_utils.get_atomic_value(
base, default=self.vm.convert.unsolvable) for base in self.bases()]
for base in bases:
abstract_utils.parse_formal_type_parameters(
base, self.full_name, self._all_formal_type_parameters)
self._all_formal_type_parameters_loaded = True
def get_own_methods(self):
"""Get the methods defined by this class."""
raise NotImplementedError(self.__class__.__name__)
def _is_protocol(self):
"""Whether this class is a protocol."""
if self.isinstance_PyTDClass():
for parent in self.pytd_cls.parents:
if isinstance(
parent, pytd.ClassType) and parent.name == "typing.Protocol":
return True
elif self.isinstance_InterpreterClass():
for parent_var in self._bases:
for parent in parent_var.data:
if (parent.isinstance_PyTDClass() and
parent.full_name == "typing.Protocol"):
return True
return False
def _init_protocol_methods(self):
"""Compute this class's protocol methods."""
if self.isinstance_ParameterizedClass():
self.protocol_methods = self.base_cls.protocol_methods
return
if not self._is_protocol():
self.protocol_methods = set()
return
if self.isinstance_PyTDClass() and self.pytd_cls.name.startswith("typing."):
# In typing.pytd, we've experimentally marked some classes such as
# Sequence, which contains a mix of abstract and non-abstract methods, as
# protocols, with only the abstract methods being required.
self.protocol_methods = self.abstract_methods
return
# For the algorithm to run, protocol_methods needs to be populated with the
# protocol methods defined by this class. We'll overwrite the attribute
# with the full set of protocol methods later.
self.protocol_methods = self.get_own_methods()
protocol_methods = set()
for cls in reversed(self.mro):
if not isinstance(cls, Class):
continue
if cls.is_protocol:
# Add protocol methods defined by this class.
protocol_methods |= {m for m in cls.protocol_methods if m in cls}
else:
# Remove methods implemented by this class.
protocol_methods = {m for m in protocol_methods if m not in cls}
self.protocol_methods = protocol_methods
def _init_overrides_bool(self):
"""Compute and cache whether the class sets its own boolean value."""
# A class's instances can evaluate to False if it defines __bool__ or
# __len__. Python2 used __nonzero__ rather than __bool__.
bool_override = "__bool__" if self.vm.PY3 else "__nonzero__"
if self.isinstance_ParameterizedClass():
self.overrides_bool = self.base_cls.overrides_bool
return
for cls in self.mro:
if isinstance(cls, Class):
if any(x in cls.get_own_methods() for x in (bool_override, "__len__")):
self.overrides_bool = True
return
self.overrides_bool = False
def get_own_abstract_methods(self):
"""Get the abstract methods defined by this class."""
raise NotImplementedError(self.__class__.__name__)
def _init_abstract_methods(self):
"""Compute this class's abstract methods."""
# For the algorithm to run, abstract_methods needs to be populated with the
# abstract methods defined by this class. We'll overwrite the attribute
# with the full set of abstract methods later.
self.abstract_methods = self.get_own_abstract_methods()
abstract_methods = set()
for cls in reversed(self.mro):
if not isinstance(cls, Class):
continue
# Remove methods implemented by this class.
abstract_methods = {m for m in abstract_methods
if m not in cls or m in cls.abstract_methods}
# Add abstract methods defined by this class.
abstract_methods |= {m for m in cls.abstract_methods if m in cls}
self.abstract_methods = abstract_methods
@property
def is_abstract(self):
has_abstract_metaclass = self.cls and any(
parent.full_name == "abc.ABCMeta" for parent in self.cls.mro)
return has_abstract_metaclass and bool(self.abstract_methods)
@property
def is_test_class(self):
return any(base.full_name in ("unittest.TestCase", "unittest.case.TestCase")
for base in self.mro)
@property
def is_protocol(self):
return bool(self.protocol_methods)
def _get_inherited_metaclass(self):
for base in self.mro[1:]:
if isinstance(base, Class) and base.cls is not None:
return base.cls
return None
def call_metaclass_init(self, node):
"""Call the metaclass's __init__ method if it does anything interesting."""
if not self.cls:
return node
node, init = self.vm.attribute_handler.get_attribute(
node, self.cls, "__init__")
if not init or not any(
f.isinstance_InterpreterFunction() for f in init.data):
# Only an InterpreterFunction has interesting side effects.
return node
# TODO(rechen): The signature is (cls, name, bases, dict); should we fill in
# the last three args more precisely?
args = function.Args(posargs=(self.to_variable(node),) + tuple(
self.vm.new_unsolvable(node) for _ in range(3)))
log.debug("Calling __init__ on metaclass %s of class %s",
self.cls.name, self.name)
node, _ = self.vm.call_function(node, init, args)
return node
def get_own_new(self, node, value):
"""Get this value's __new__ method, if it isn't object.__new__.
Args:
node: The current node.
value: A cfg.Binding containing this value.
Returns:
A tuple of (1) a node and (2) either a cfg.Variable of the special
__new__ method, or None.
"""
node, new = self.vm.attribute_handler.get_attribute(
node, value.data, "__new__")
if new is None:
return node, None
if len(new.bindings) == 1:
f = new.bindings[0].data
if (f.isinstance_AMBIGUOUS_OR_EMPTY() or
self.vm.convert.object_type.is_object_new(f)):
# Instead of calling object.__new__, our abstract classes directly
# create instances of themselves.
return node, None
return node, new
def _call_new_and_init(self, node, value, args):
"""Call __new__ if it has been overridden on the given value."""
node, new = self.get_own_new(node, value)
if new is None:
return node, None
cls = value.AssignToNewVariable(node)
new_args = args.replace(posargs=(cls,) + args.posargs)
node, variable = self.vm.call_function(node, new, new_args)
for val in variable.bindings:
# If val.data is a class, _call_init mistakenly calls val.data's __init__
# method rather than that of val.data.cls.
if not isinstance(val.data, Class) and self == val.data.cls:
node = self._call_init(node, val, args)
return node, variable
def _call_method(self, node, value, method_name, args):
node, method = self.vm.attribute_handler.get_attribute(
node, value.data, method_name, value)
if method:
call_repr = "%s.%s(..._)" % (self.name, method_name)
log.debug("calling %s", call_repr)
node, ret = self.vm.call_function(node, method, args)
log.debug("%s returned %r", call_repr, ret)
return node
def _call_init(self, node, value, args):
node = self._call_method(node, value, "__init__", args)
# Test classes initialize attributes in setUp() as well.
if self.is_test_class:
node = self._call_method(node, value, "setUp", function.Args(()))
return node
def _new_instance(self):
# We allow only one "instance" per code location, regardless of call stack.
key = self.vm.frame.current_opcode
assert key
if key not in self._instance_cache:
self._instance_cache[key] = self._to_instance()
return self._instance_cache[key]
def call(self, node, value, args):
if self.is_abstract:
self.vm.errorlog.not_instantiable(self.vm.frames, self)
node, variable = self._call_new_and_init(node, value, args)
if variable is None:
value = self._new_instance()
variable = self.vm.program.NewVariable()
val = variable.AddBinding(value, [], node)
node = self._call_init(node, val, args)
return node, variable
def get_special_attribute(self, node, name, valself):
"""Fetch a special attribute."""
if name == "__getitem__" and valself is None:
# See vm._call_binop_on_bindings: valself == None is a special value that
# indicates an annotation.
if self.cls:
# This class has a custom metaclass; check if it defines __getitem__.
_, attr = self.vm.attribute_handler.get_attribute(
node, self, name, self.to_binding(node))
if attr:
return attr
# Treat this class as a parameterized container in an annotation. We do
# not need to worry about the class not being a container: in that case,
# AnnotationContainer's param length check reports an appropriate error.
container = self.to_annotation_container()
return container.get_special_attribute(node, name, valself)
return Class.super(self.get_special_attribute)(node, name, valself)
def has_dynamic_attributes(self):
return any(a in self for a in abstract_utils.DYNAMIC_ATTRIBUTE_MARKERS)
def compute_is_dynamic(self):
# This needs to be called after self.mro is set.
return any(c.has_dynamic_attributes()
for c in self.mro
if isinstance(c, Class))
def compute_mro(self):
"""Compute the class precedence list (mro) according to C3."""
bases = abstract_utils.get_mro_bases(self.bases(), self.vm)
bases = [[self]] + [list(base.mro) for base in bases] + [list(bases)]
# If base classes are `ParameterizedClass`, we will use their `base_cls` to
# calculate the MRO. Bacause of type parameter renaming, we can not compare
# the `ParameterizedClass`s which contain the same `base_cls`. See example:
# class A(Iterator[T]): ...
# class B(Iterator[U], A[V]): ...
# The inheritance: [B], [Iterator, ...], [A, Iterator, ...], [Iterator, A]
# So this has MRO order issue, but because the template names of
# `ParameterizedClass` of `Iterator` are different, they will be treated as
# different base classes and it will infer the MRO order is correct.
# TODO(ahxun): fix this by solving the template rename problem
base2cls = {}
newbases = []
for row in bases:
baselist = []
for base in row:
if base.isinstance_ParameterizedClass():
base2cls[base.base_cls] = base
baselist.append(base.base_cls)
else:
base2cls[base] = base
baselist.append(base)
newbases.append(baselist)
# calc MRO and replace them with original base classes
return tuple(base2cls[base] for base in mro.MROMerge(newbases))
| [
"[email protected]"
] | |
b2c96c93f8929908a4a3a0d19dc92b0814c5c748 | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /src/transformers/models/gptj/modeling_tf_gptj.py | f215adaaac005501e99f26d42bef5e99b732eac3 | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 43,937 | py | # coding=utf-8
# Copyright 2022 The EleutherAI and HuggingFace Teams. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 GPT-J model."""
from __future__ import annotations
from typing import Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutputWithPast,
TFCausalLMOutputWithPast,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutputWithPast,
)
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFModelInputType,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSharedEmbeddings,
get_initializer,
keras_serializable,
unpack_inputs,
)
from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
from ...utils import logging
from .configuration_gptj import GPTJConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "EleutherAI/gpt-j-6B"
_CONFIG_FOR_DOC = "GPTJConfig"
GPTJ_PRETRAINED_MODEL_ARCHIVE_LIST = [
"EleutherAI/gpt-j-6B",
# See all GPT-J models at https://huggingface.co/models?filter=gptj
]
def create_sinusoidal_positions(num_pos: int, dim: int) -> tf.Tensor:
inv_freq = tf.cast(1.0 / (10000 ** (tf.range(0, dim, 2) / dim)), tf.float32)
sinusoid_inp = tf.cast(tf.einsum("i , j -> i j", tf.range(num_pos, dtype=tf.float32), inv_freq), tf.float32)
sin, cos = tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)
out = tf.concat((sin, cos), axis=1)
return out
def rotate_every_two(x: tf.Tensor) -> tf.Tensor:
rotate_half_tensor = tf.stack((-x[:, :, :, 1::2], x[:, :, :, ::2]), axis=-1)
new_shape = shape_list(rotate_half_tensor)[:-2] + [tf.math.reduce_prod(shape_list(rotate_half_tensor)[-2:])]
rotate_half_tensor = tf.reshape(rotate_half_tensor, new_shape)
return rotate_half_tensor
def apply_rotary_pos_emb(tensor: tf.Tensor, sincos: tf.Tensor) -> tf.Tensor:
sin_pos, cos_pos = sincos
sin_pos = tf.repeat(sin_pos[:, :, None, :], 2, 3)
cos_pos = tf.repeat(cos_pos[:, :, None, :], 2, 3)
return (tensor * cos_pos) + (rotate_every_two(tensor) * sin_pos)
class TFGPTJAttention(tf.keras.layers.Layer):
def __init__(self, config: GPTJConfig, **kwargs):
super().__init__(**kwargs)
self.embed_dim = config.hidden_size
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.embed_dim // self.num_attention_heads
if self.head_dim * self.num_attention_heads != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_attention_heads (got `embed_dim`: {self.embed_dim} and"
f" `num_attention_heads`: {self.num_attention_heads})."
)
self.scale_attn = self.head_dim**0.5
self.rotary_dim = config.rotary_dim
self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop)
self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop)
self.q_proj = tf.keras.layers.Dense(
self.embed_dim,
use_bias=False,
kernel_initializer=get_initializer(config.initializer_range),
name="q_proj",
)
self.k_proj = tf.keras.layers.Dense(
self.embed_dim,
use_bias=False,
kernel_initializer=get_initializer(config.initializer_range),
name="k_proj",
)
self.v_proj = tf.keras.layers.Dense(
self.embed_dim,
use_bias=False,
kernel_initializer=get_initializer(config.initializer_range),
name="v_proj",
)
self.out_proj = tf.keras.layers.Dense(
self.embed_dim,
use_bias=False,
kernel_initializer=get_initializer(config.initializer_range),
name="out_proj",
)
self.max_positions = config.max_position_embeddings
self.lower_triangle_mask = tf.reshape(
tf.cast(tf.experimental.numpy.tril(tf.ones((self.max_positions, self.max_positions))), tf.int8),
(1, 1, self.max_positions, self.max_positions),
)
pos_embd_dim = self.rotary_dim or self.embed_dim
self.embed_positions = create_sinusoidal_positions(self.max_positions, pos_embd_dim)
def get_causal_mask(self, key_length, query_length) -> tf.Tensor:
return tf.cast(self.lower_triangle_mask[:, :, key_length - query_length : key_length, :key_length], tf.bool)
@staticmethod
def get_masked_bias(dtype: tf.DType) -> tf.Tensor:
return tf.cast(tf.constant(-1e9), dtype)
def _split_heads(self, hidden_states: tf.Tensor, rotary: bool) -> tf.Tensor:
"""
Splits hidden dim into attn_head_size and num_attention_heads
"""
new_shape = shape_list(hidden_states)[:-1] + [self.num_attention_heads, self.head_dim]
hidden_states = tf.reshape(hidden_states, new_shape)
if rotary:
return hidden_states
if len(shape_list(hidden_states)) == 4:
return tf.transpose(hidden_states, (0, 2, 1, 3)) # (batch, head, seq_length, head_features)
if len(shape_list(hidden_states)) == 5:
return tf.transpose(hidden_states, (0, 1, 3, 2, 4)) # (batch, blocks, head, block_length, head_features)
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
def _merge_heads(self, hidden_states: tf.Tensor) -> tf.Tensor:
"""
Merges attn_head_size dim and num_attn_heads dim into hidden dim
"""
if len(shape_list(hidden_states)) == 4:
hidden_states = tf.transpose(hidden_states, (0, 2, 1, 3))
elif len(shape_list(hidden_states)) == 5:
hidden_states = tf.transpose(hidden_states, (0, 1, 3, 2, 4))
else:
raise ValueError(f"Input tensor rank should be one of [4, 5], but is: {len(shape_list(hidden_states))}")
new_shape = shape_list(hidden_states)[:-2] + [self.num_attention_heads * self.head_dim]
return tf.reshape(hidden_states, new_shape)
def _attn(
self,
query: tf.Tensor,
key: tf.Tensor,
value: tf.Tensor,
attention_mask: tf.Tensor | None = None,
head_mask: tf.Tensor | None = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
# compute causal mask from causal mask buffer
query_length, key_length = shape_list(query)[-2], shape_list(key)[-2]
causal_mask = self.get_causal_mask(key_length, query_length)
# Keep the attention weights computation in fp32 to avoid overflow issues
query = tf.cast(query, tf.float32)
key = tf.cast(key, tf.float32)
attn_weights = tf.matmul(query, key, transpose_b=True)
attn_weights = tf.where(causal_mask, attn_weights, self.get_masked_bias(attn_weights.dtype))
attn_weights = attn_weights / self.scale_attn
if attention_mask is not None:
# Apply the attention mask
attn_weights = attn_weights + attention_mask
attn_weights = stable_softmax(attn_weights, axis=-1)
attn_weights = tf.cast(attn_weights, value.dtype)
attn_weights = self.attn_dropout(attn_weights)
# Mask heads if we want to
if head_mask is not None:
attn_weights = attn_weights * head_mask
attn_output = tf.matmul(attn_weights, value)
return attn_output, attn_weights
def call(
self,
hidden_states: tf.Tensor,
layer_past: Optional[Tuple[tf.Tensor, tf.Tensor]] = None,
attention_mask: tf.Tensor | None = None,
position_ids: tf.Tensor | None = None,
head_mask: tf.Tensor | None = None,
use_cache: bool = False,
output_attentions: bool = False,
):
query = self.q_proj(hidden_states)
key = self.k_proj(hidden_states)
value = self.v_proj(hidden_states)
query = self._split_heads(query, True)
key = self._split_heads(key, True)
value = self._split_heads(value, False)
sincos = tf.cast(tf.gather(self.embed_positions, position_ids, axis=0), hidden_states.dtype)
sincos = tf.split(sincos, 2, axis=-1)
if self.rotary_dim is not None:
k_rot = key[:, :, :, : self.rotary_dim]
k_pass = key[:, :, :, self.rotary_dim :]
q_rot = query[:, :, :, : self.rotary_dim]
q_pass = query[:, :, :, self.rotary_dim :]
k_rot = apply_rotary_pos_emb(k_rot, sincos)
q_rot = apply_rotary_pos_emb(q_rot, sincos)
key = tf.concat((k_rot, k_pass), axis=-1)
query = tf.concat((q_rot, q_pass), axis=-1)
else:
key = apply_rotary_pos_emb(key, sincos)
query = apply_rotary_pos_emb(query, sincos)
key = tf.transpose(key, (0, 2, 1, 3))
query = tf.transpose(query, (0, 2, 1, 3))
if layer_past is not None:
past_key = layer_past[0]
past_value = layer_past[1]
key = tf.concat((past_key, key), axis=-2)
value = tf.concat((past_value, value), axis=-2)
if use_cache is True:
present = (key, value)
else:
present = None
# compute self-attention: V x Softmax(QK^T)
attn_output, attn_weights = self._attn(query, key, value, attention_mask, head_mask)
attn_output = self._merge_heads(attn_output)
attn_output = self.out_proj(attn_output)
attn_output = self.resid_dropout(attn_output)
outputs = (attn_output, present)
if output_attentions:
outputs += (attn_weights,)
return outputs # a, present, (attentions)
class TFGPTJMLP(tf.keras.layers.Layer):
def __init__(self, intermediate_size: int, config: GPTJConfig, **kwargs):
super().__init__(**kwargs)
embed_dim = config.n_embd
self.fc_in = tf.keras.layers.Dense(
intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="fc_in"
)
self.fc_out = tf.keras.layers.Dense(
embed_dim, kernel_initializer=get_initializer(config.initializer_range), name="fc_out"
)
self.act = get_tf_activation(config.activation_function)
self.dropout = tf.keras.layers.Dropout(config.embd_pdrop)
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.fc_in(hidden_states)
hidden_states = self.act(hidden_states)
hidden_states = self.fc_out(hidden_states)
hidden_states = self.dropout(hidden_states)
return hidden_states
class TFGPTJBlock(tf.keras.layers.Layer):
def __init__(self, config: GPTJConfig, **kwargs):
super().__init__(**kwargs)
inner_dim = config.n_inner if config.n_inner is not None else 4 * config.n_embd
self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1")
self.attn = TFGPTJAttention(config, name="attn")
self.mlp = TFGPTJMLP(inner_dim, config, name="mlp")
def call(
self,
hidden_states: tf.Tensor,
layer_past: tf.Tensor | None = None,
attention_mask: tf.Tensor | None = None,
position_ids: tf.Tensor | None = None,
head_mask: tf.Tensor | None = None,
use_cache: bool = False,
output_attentions: bool = False,
):
residual = hidden_states
hidden_states = self.ln_1(hidden_states)
attn_outputs = self.attn(
hidden_states=hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask,
use_cache=use_cache,
output_attentions=output_attentions,
) # attn_outputs: attn_output, present, (attentions)
attn_output = attn_outputs[0]
outputs = attn_outputs[1:]
feed_forward_hidden_states = self.mlp(hidden_states)
hidden_states = attn_output + feed_forward_hidden_states + residual
if use_cache:
outputs = (hidden_states,) + outputs
else:
outputs = (hidden_states,) + outputs[1:]
return outputs # hidden_states, present, (attentions)
@keras_serializable
class TFGPTJMainLayer(tf.keras.layers.Layer):
config_class = GPTJConfig
def __init__(self, config: GPTJConfig, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
self.config = config
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.use_cache = config.use_cache
self.return_dict = config.use_return_dict
self.num_hidden_layers = config.n_layer
self.n_embd = config.n_embd
self.n_positions = config.n_positions
self.initializer_range = config.initializer_range
self.wte = TFSharedEmbeddings(
config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte"
)
self.drop = tf.keras.layers.Dropout(config.embd_pdrop)
self.h = [TFGPTJBlock(config, name=f"h_._{i}") for i in range(config.n_layer)]
self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f")
def get_input_embeddings(self):
return self.wte
def set_input_embeddings(self, value: tf.Tensor):
self.wte.weight = value
self.wte.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer}
"""
raise NotImplementedError
@unpack_inputs
def call(
self,
input_ids=None,
past_key_values=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
if input_ids is not None and inputs_embeds is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif input_ids is not None:
input_shape = shape_list(input_ids)
input_ids = tf.reshape(input_ids, [-1, input_shape[-1]])
elif inputs_embeds is not None:
input_shape = shape_list(inputs_embeds)[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if past_key_values is None:
past_length = 0
past_key_values = [None] * len(self.h)
else:
past_length = shape_list(past_key_values[0][0])[-2]
if position_ids is None:
position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0)
if attention_mask is not None:
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
attention_mask_shape = shape_list(attention_mask)
attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
one_cst = tf.constant(1.0)
attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype)
attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0))
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.num_hidden_layers
# head_mask = tf.constant([0] * self.num_hidden_layers)
position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]])
if inputs_embeds is None:
check_embeddings_within_bounds(input_ids, self.wte.vocab_size)
inputs_embeds = self.wte(input_ids, mode="embedding")
if token_type_ids is not None:
token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]])
token_type_embeds = self.wte(token_type_ids, mode="embedding")
else:
token_type_embeds = tf.constant(0.0)
token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype)
hidden_states = inputs_embeds + token_type_embeds
hidden_states = self.drop(hidden_states, training=training)
output_shape = input_shape + [shape_list(hidden_states)[-1]]
presents = () if use_cache else None
all_attentions = () if output_attentions else None
all_hidden_states = () if output_hidden_states else None
for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):
if output_hidden_states:
all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),)
outputs = block(
hidden_states=hidden_states,
layer_past=layer_past,
attention_mask=attention_mask,
position_ids=position_ids,
head_mask=head_mask[i],
use_cache=use_cache,
output_attentions=output_attentions,
training=training,
)
hidden_states = outputs[0]
if use_cache:
presents = presents + (outputs[1],)
if output_attentions:
all_attentions = all_attentions + (outputs[2 if use_cache else 1],)
hidden_states = self.ln_f(hidden_states)
hidden_states = tf.reshape(hidden_states, output_shape)
# Add last hidden state
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if output_attentions:
# let the number of heads free (-1) so we can extract attention even after head pruning
attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:]
all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions)
if not return_dict:
return tuple(v for v in [hidden_states, presents, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=presents,
hidden_states=all_hidden_states,
attentions=all_attentions,
)
class TFGPTJPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = GPTJConfig
base_model_prefix = "transformer"
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias"]
GPTJ_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TensorFlow models and layers in `transformers` accept two formats as input:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional argument.
The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
positional argument:
- a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Note that when creating models and layers with
[subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
about any of this, as you can just pass inputs like you would to any other Python function!
</Tip>
Parameters:
config ([`GPTJConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
GPTJ_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`):
`input_ids_length` = `sequence_length` if `past` is `None` else `past[0].shape[-2]` (`sequence_length` of
input past key value states). Indices of input sequence tokens in the vocabulary.
If `past` is used, only input IDs that do not have their past calculated should be passed as `input_ids`.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
past_key_values (`List[tf.Tensor]` of length `config.n_layers`):
Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see
`past` output below). Can be used to speed up sequential decoding. The token ids which have their past
given to this model should not be passed as input ids as they have already been computed.
attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare GPT-J Model transformer outputting raw hidden-states without any specific head on top.",
GPTJ_START_DOCSTRING,
)
class TFGPTJModel(TFGPTJPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFGPTJMainLayer(config, name="transformer")
@unpack_inputs
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING)
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
head_mask: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFBaseModelOutputWithPast, Tuple[tf.Tensor]]:
r"""
use_cache (`bool`, *optional*, defaults to `True`):
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
`past`). Set to `False` during training, `True` during generation
"""
outputs = self.transformer(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
return outputs
@add_start_docstrings(
"""
The GPT-J Model transformer with a language modeling head on top.
""",
GPTJ_START_DOCSTRING,
)
class TFGPTJForCausalLM(TFGPTJPreTrainedModel, TFCausalLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.transformer = TFGPTJMainLayer(config, name="transformer")
self.lm_head = tf.keras.layers.Dense(
config.vocab_size, kernel_initializer=get_initializer(config.initializer_range), name="lm_head"
)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs):
token_type_ids = kwargs.get("token_type_ids", None)
# only last token for inputs_ids if past is defined in kwargs
if past_key_values:
inputs = tf.expand_dims(inputs[:, -1], -1)
if token_type_ids is not None:
token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1)
position_ids = kwargs.get("position_ids", None)
attention_mask = kwargs.get("attention_mask", None)
if attention_mask is not None and position_ids is None:
position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True)
if past_key_values:
position_ids = tf.expand_dims(position_ids[:, -1], -1)
return {
"input_ids": inputs,
"attention_mask": attention_mask,
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": use_cache,
"token_type_ids": token_type_ids,
}
@unpack_inputs
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFCausalLMOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
head_mask: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
labels: np.ndarray | tf.Tensor | None = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFCausalLMOutputWithPast, Tuple[tf.Tensor]]:
r"""
labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
"""
transformer_outputs = self.transformer(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
hidden_states = transformer_outputs[0]
lm_logits = self.lm_head(hidden_states)
loss = None
if labels is not None:
# shift labels to the left and cut last logit token
shifted_logits = lm_logits[:, :-1]
labels = labels[:, 1:]
loss = self.hf_compute_loss(labels, shifted_logits)
if not return_dict:
output = (lm_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutputWithPast(
loss=loss,
logits=lm_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The GPT-J Model transformer with a sequence classification head on top (linear layer).
[`GPTJForSequenceClassification`] uses the last token in order to do the classification, as other causal models
(e.g. GPT, GPT-2, GPT-Neo) do.
Since it does classification on the last token, it requires to know the position of the last token. If a
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
each row of the batch).
""",
GPTJ_START_DOCSTRING,
)
class TFGPTJForSequenceClassification(TFGPTJPreTrainedModel, TFSequenceClassificationLoss):
_keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFGPTJMainLayer(config, name="transformer")
self.score = tf.keras.layers.Dense(
self.num_labels,
use_bias=False,
kernel_initializer=get_initializer(config.initializer_range),
name="score",
)
@unpack_inputs
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutputWithPast,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
head_mask: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
labels: np.ndarray | tf.Tensor | None = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]:
r"""
labels (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
transformer_outputs = self.transformer(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
hidden_states = transformer_outputs[0]
logits = self.score(hidden_states)
logits_shape = shape_list(logits)
in_logits = None
if self.config.pad_token_id is None:
sequence_lengths = -1
else:
if input_ids is not None:
sequence_lengths = (
tf.argmax(tf.cast(tf.math.equal(input_ids, self.config.pad_token_id), input_ids.dtype), axis=-1)
- 1
)
sequence_lengths = tf.where(sequence_lengths >= 0, sequence_lengths, input_ids.shape[-1] - 1)
in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1)
else:
sequence_lengths = -1
logger.warning(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
loss = None
if labels is not None:
if self.config.pad_token_id is None and logits_shape[0] != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if not tf.is_tensor(sequence_lengths):
in_logits = logits[0 : logits_shape[0], sequence_lengths]
loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels]))
pooled_logits = in_logits if in_logits is not None else logits
if not return_dict:
output = (pooled_logits,) + transformer_outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=transformer_outputs.past_key_values,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
@add_start_docstrings(
"""
The GPT-J Model transformer with a span classification head on top for extractive question-answering tasks like
SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
GPTJ_START_DOCSTRING,
)
class TFGPTJForQuestionAnswering(TFGPTJPreTrainedModel, TFQuestionAnsweringLoss):
_keys_to_ignore_on_load_missing = [r"h.\d+.attn.masked_bias", r"h.\d+.attn.bias", r"lm_head.weight"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.transformer = TFGPTJMainLayer(config, name="transformer")
self.qa_outputs = tf.keras.layers.Dense(
self.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@unpack_inputs
@add_start_docstrings_to_model_forward(GPTJ_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: TFModelInputType | None = None,
past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
attention_mask: np.ndarray | tf.Tensor | None = None,
token_type_ids: np.ndarray | tf.Tensor | None = None,
position_ids: np.ndarray | tf.Tensor | None = None,
head_mask: np.ndarray | tf.Tensor | None = None,
inputs_embeds: np.ndarray | tf.Tensor | None = None,
start_positions: np.ndarray | tf.Tensor | None = None,
end_positions: np.ndarray | tf.Tensor | None = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`np.ndarray` or `tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
transformer_outputs = self.transformer(
input_ids=input_ids,
past_key_values=past_key_values,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
)
sequence_output = transformer_outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if start_positions is not None and end_positions is not None:
labels = {"start_position": start_positions}
labels["end_position"] = end_positions
loss = self.hf_compute_loss(labels, (start_logits, end_logits))
if not return_dict:
output = (start_logits, end_logits) + transformer_outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=transformer_outputs.hidden_states,
attentions=transformer_outputs.attentions,
)
| [
"[email protected]"
] | |
5afadcff75d577496520b4eb19d8797e2579c837 | f68e0b205bd3eb036905c60bd03a8d9c7f3b1d88 | /machine_learning/3.3.logistic-regression.py | 1d88351ce1bc85e63bb039ea2ead4f43f3e9061a | [] | no_license | SleepyBag/TrivialPractice | c31458d0c28afba158cb4090cb7013267ff54bb2 | 8e006fbe1425f62b52b2a5fe5b6404ea1883f3ab | refs/heads/master | 2020-03-22T00:34:37.415074 | 2018-06-30T14:02:04 | 2018-06-30T14:02:04 | 139,253,389 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,386 | py | import numpy as np
from math import log
from math import exp
input_dim = 2
output_dim = 1
beta = np.random.normal(size=(input_dim + 1, output_dim))
def p1(xhat, beta):
e = np.exp(np.dot(beta.T, xhat))[0][0]
return e / (1 + e)
def iterate(X, Y, beta):
import pdb
# pdb.set_trace()
grad = np.zeros(shape=beta.shape)
grad2 = 0
loss = 0
for x, y in zip(X, Y):
xhat = np.concatenate((np.array([x]).T, np.array([[1]])))
grad += - xhat * (y - p1(xhat, beta))
grad2 += np.dot(xhat, xhat.T) * p1(xhat, beta) * (1 - p1(xhat, beta))
loss += log(1 + exp(np.dot(beta.T, xhat))) - y * np.dot(beta.T, xhat)
print(log(1 + exp(np.dot(beta.T, xhat))) - y * np.dot(beta.T, xhat))
# pdb.set_trace()
beta = beta - np.dot(np.linalg.inv(grad2), grad)
return grad, grad2, beta, loss
X = np.array([[.697, .460], [.774, .376],
[.634, .264], [.608, .318],
[.556, .215], [.403, .237],
[.481, .149], [.437, .211],
[.666, .091], [.243, .267],
[.245, .057], [.343, .099],
[.639, .161], [.657, .198],
[.360, .370], [.593, .042], [.719, .103]])
Y = np.array([[1]] * 8 + [[0]] * 9)
epoch = 50
for i in range(epoch):
print('Epoch' ,i ,'started')
grad, grad2, beta, loss = iterate(X, Y, beta)
print('loss =',loss)
| [
"[email protected]"
] | |
c1c45cd2c22039c954ba3d32df4cdc8fca29ead1 | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano3083.py | 5e9f2bdd220d538e97177f62beff4913cf0d5d34 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/120000/88C77E97-2037-B64C-9BB6-EA084CD2A6BE.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest3083.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
4eebc2b5ea092f4821209c3a0ae4de3b3c0976ec | a72a154e735100827456a22571aa520d1bbdf50e | /nnvm/python/nnvm/frontend/keras.py | d8c98ec66a56e9e249b285e0aade19adc8066b2c | [
"Apache-2.0"
] | permissive | chengshaoyi/tvm | e6a0caf06ca3ea28ce82caef283b2f1e98f88491 | b877687fbeb8a6c12b62aac9869f7e54091395f4 | refs/heads/master | 2020-03-19T15:39:28.603095 | 2018-06-08T17:23:34 | 2018-06-08T17:23:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,431 | py | # pylint: disable=invalid-name, import-self
"""Keras frontend."""
from __future__ import absolute_import as _abs
import sys
import numpy as np
import tvm
from .. import symbol as _sym
from .common import SymbolTable
__all__ = ['from_keras']
def _check_data_format(keras_layer):
if hasattr(keras_layer, ('data_format')):
if keras_layer.data_format != 'channels_last':
raise ValueError("Keras frontend currently supports data_format = channels_last only.")
def _get_pad_pair(input1d, kernel1d, stride1d):
out1d = (input1d + stride1d - 1) // stride1d
pad = np.maximum((out1d - 1) * stride1d + kernel1d - input1d, 0)
pad_before = pad // 2
pad_after = pad - pad_before
return [pad_before, pad_after]
def _get_elu(insym, alpha):
""" A helper method for elu.
"""
return -alpha * _sym.relu(1 - _sym.exp(insym)) + _sym.relu(insym)
def _convert_activation(insym, keras_layer, _):
if isinstance(keras_layer, str):
act_type = keras_layer
else:
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type == 'linear':
if isinstance(keras_layer, str):
return insym
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1
beta = keras_layer.beta if hasattr(keras_layer, "beta") else 0
return _sym.__add_scalar__(_sym.__mul_scalar__(insym, \
scalar=alpha), scalar=beta)
elif act_type == 'softmax':
return _sym.softmax(insym, axis=1)
elif act_type == 'sigmoid':
return _sym.sigmoid(insym)
elif act_type == 'tanh':
return _sym.tanh(insym)
elif act_type == 'relu':
return _sym.relu(insym)
elif act_type == 'softplus':
return _sym.log(_sym.__add_scalar__(_sym.exp(insym), scalar=1))
elif act_type == 'elu':
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1
return _get_elu(insym, alpha)
elif act_type == 'selu':
# Alpha, Gamma values, obtained from https://arxiv.org/abs/1706.02515
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1.6732
gamma = keras_layer.gamma if hasattr(keras_layer, "gamma") else 1.0507
return gamma * _get_elu(insym, alpha)
elif act_type == 'relu6':
return _sym.clip(insym, a_min=0, a_max=6)
elif act_type == 'softsign':
return insym / (1 + (_sym.relu(insym) + _sym.relu(_sym.negative(insym))))
elif act_type == 'hard_sigmoid':
transformX = (0.2 * insym) + 0.5
return _sym.clip(transformX, a_min=0, a_max=1)
else:
raise TypeError("Unsupported activation type : {}".format(act_type))
def _convert_advanced_activation(insym, keras_layer, symtab):
act_type = type(keras_layer).__name__
if act_type == 'LeakyReLU':
return _sym.leaky_relu(insym, alpha=keras_layer.alpha)
elif act_type == 'ELU':
alpha = keras_layer.alpha if hasattr(keras_layer, "alpha") else 1
return _get_elu(insym, alpha)
elif act_type == 'PReLU':
assert hasattr(keras_layer, "alpha"), \
"alpha required for PReLU."
_check_data_format(keras_layer)
size = len(keras_layer.alpha.shape)
return -symtab.new_const(keras_layer.get_weights()[0] \
.transpose(np.roll(range(size), 1))) \
* _sym.relu(-insym) + _sym.relu(insym)
elif act_type == 'ThresholdedReLU':
theta = keras_layer.theta if hasattr(keras_layer, "theta") else 1.0
theta_tensor = _sym.full_like(insym[0], fill_value=float(theta))
return _sym.elemwise_mul(insym[0], _sym.greater(insym[0], theta_tensor, out_type="float32"))
else:
raise TypeError("Unsupported advanced activation type : {}".format(act_type))
def _convert_merge(insym, keras_layer, _):
merge_type = type(keras_layer).__name__
ret = insym[0]
for i in range(1, len(insym)):
if merge_type == 'Add':
ret = _sym.elemwise_add(ret, insym[i])
elif merge_type == 'Subtract':
ret = _sym.elemwise_sub(ret, insym[i])
elif merge_type == 'Multiply':
ret = _sym.elemwise_mul(ret, insym[i])
elif merge_type == 'Average':
raise NotImplementedError('Average merge not implemented')
elif merge_type == 'Maximum':
raise NotImplementedError('Maximum merge not implemented')
else:
raise TypeError("Unsupported merge type : {}".format(merge_type))
return ret
def _convert_dense(insym, keras_layer, symtab):
weightList = keras_layer.get_weights()
weight = symtab.new_const(weightList[0].transpose([1, 0]))
params = {'weight':weight, 'use_bias':False, 'units':weightList[0].shape[1]}
if keras_layer.use_bias:
params['use_bias'] = True
params['bias'] = symtab.new_const(weightList[1])
out = _sym.dense(data=insym, **params)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, act_type, symtab)
return out
def _convert_convolution(insym, keras_layer, symtab):
_check_data_format(keras_layer)
is_deconv = type(keras_layer).__name__ == 'Conv2DTranspose'
is_depthconv = type(keras_layer).__name__ == 'DepthwiseConv2D'
weightList = keras_layer.get_weights()
if is_deconv:
kernel_h, kernel_w, n_filters, in_channels = weightList[0].shape
weight = weightList[0].transpose([3, 2, 0, 1])
elif is_depthconv:
kernel_h, kernel_w, in_channels, depth_mult = weightList[0].shape
weight = weightList[0].transpose([2, 3, 0, 1])
else:
kernel_h, kernel_w, in_channels, n_filters = weightList[0].shape
weight = weightList[0].transpose([3, 2, 0, 1])
dilation = [1, 1]
if isinstance(keras_layer.dilation_rate, (list, tuple)):
dilation = [keras_layer.dilation_rate[0], keras_layer.dilation_rate[1]]
else:
dilation = [keras_layer.dilation_rate, keras_layer.dilation_rate]
kernel_h = (kernel_h - 1) * dilation[0] + 1
kernel_w = (kernel_w - 1) * dilation[1] + 1
stride_h, stride_w = keras_layer.strides
params = {'weight': symtab.new_const(weight),
'kernel_size': [kernel_h, kernel_w],
'strides': [stride_h, stride_w],
'dilation': dilation,
'padding': [0, 0],
'use_bias': False}
if is_depthconv:
params['channels'] = in_channels * depth_mult
params['groups'] = in_channels
else:
params['channels'] = n_filters
if keras_layer.use_bias:
params['use_bias'] = True
params['bias'] = symtab.new_const(weightList[1])
if keras_layer.padding == 'valid':
pass
# we insert a separate pad operator
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, kernel_w, stride_w)
insym = _sym.pad(data=insym, pad_width=((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
else:
raise TypeError("Unsupported padding type : {}".format(keras_layer.padding))
if is_deconv:
out = _sym.conv2d_transpose(data=insym, **params)
else:
out = _sym.conv2d(data=insym, **params)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, act_type, symtab)
return out
def _convert_separable_convolution(insym, keras_layer, symtab):
_check_data_format(keras_layer)
weightList = keras_layer.get_weights()
# depthwise conv
kernel_h, kernel_w, in_channels, depth_mult = weightList[0].shape
stride_h, stride_w = keras_layer.strides
weight0 = weightList[0].transpose([2, 3, 0, 1])
params0 = {'weight': symtab.new_const(weight0),
'channels': in_channels * depth_mult,
'groups': in_channels,
'kernel_size': [kernel_h, kernel_w],
'strides': [stride_h, stride_w],
'dilation': [1, 1],
'padding': [0, 0],
'use_bias': False}
if keras_layer.padding == 'valid':
pass
# we insert a separate pad operator
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, kernel_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, kernel_w, stride_w)
insym = _sym.pad(data=insym, pad_width=(
(0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
else:
raise TypeError("Unsupported padding type : {}".format(keras_layer.padding))
depthconv = _sym.conv2d(data=insym, **params0)
# pointwise conv
weight1 = weightList[1].transpose([3, 2, 0, 1])
params1 = {'weight': symtab.new_const(weight1),
'channels': weight1.shape[0],
'groups': 1,
'kernel_size': [1, 1],
'strides': [1, 1],
'dilation': [1, 1],
'use_bias': False}
if keras_layer.use_bias:
params1['use_bias'] = True
params1['bias'] = symtab.new_const(weightList[2])
out = _sym.conv2d(data=depthconv, **params1)
# defuse activation
if sys.version_info.major < 3:
act_type = keras_layer.activation.func_name
else:
act_type = keras_layer.activation.__name__
if act_type != 'linear':
out = _convert_activation(out, act_type, symtab)
return out
def _convert_flatten(insym, keras_layer, _):
_check_data_format(keras_layer)
# NCHW -> NHWC so that dense can be correctly converted
insym = _sym.transpose(insym, axes=[0, 2, 3, 1])
return _sym.flatten(insym)
def _convert_pooling(insym, keras_layer, symtab):
_check_data_format(keras_layer)
pool_type = type(keras_layer).__name__
# global pool in keras = global pool + flatten in nnvm
if pool_type == 'GlobalMaxPooling2D':
return _convert_flatten(_sym.global_max_pool2d(insym), keras_layer, symtab)
elif pool_type == 'GlobalAveragePooling2D':
return _convert_flatten(_sym.global_avg_pool2d(insym), keras_layer, symtab)
else:
pool_h, pool_w = keras_layer.pool_size
stride_h, stride_w = keras_layer.strides
params = {'pool_size': [pool_h, pool_w],
'strides': [stride_h, stride_w],
'padding': [0, 0]}
if keras_layer.padding == 'valid':
pass
# we insert a separate pad operator
elif keras_layer.padding == 'same':
in_h = keras_layer.input_shape[1]
in_w = keras_layer.input_shape[2]
pad_t, pad_b = _get_pad_pair(in_h, pool_h, stride_h)
pad_l, pad_r = _get_pad_pair(in_w, pool_w, stride_w)
insym = _sym.pad(data=insym, pad_width=(
(0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)))
else:
raise TypeError("Unsupported padding type : {}".format(keras_layer.padding))
if pool_type == 'MaxPooling2D':
return _sym.max_pool2d(insym, **params)
elif pool_type == 'AveragePooling2D':
# TODO: in keras, padded zeros are not calculated
return _sym.avg_pool2d(insym, **params)
else:
raise TypeError("Unsupported pooling type : {}".format(keras_layer))
def _convert_upsample(insym, keras_layer, _):
_check_data_format(keras_layer)
upsample_type = type(keras_layer).__name__
if upsample_type == "UpSampling1D":
h = keras_layer.size
params = {'scale': h}
elif upsample_type == "UpSampling2D":
h, w = keras_layer.size
if h != w:
raise TypeError("Unsupported upsampling type with different axes size : {}"
.format(keras_layer.size))
params = {'scale': h}
elif upsample_type == "UpSampling3D":
h, w, d = keras_layer.size
if h != w or w != d:
raise TypeError("Unsupported upsampling type with different axes size : {}"
.format(keras_layer.size))
params = {'scale': h}
else:
raise TypeError("Unsupported upsampling type : {}".format(upsample_type))
return _sym.upsampling(insym, **params)
def _convert_batchnorm(insym, keras_layer, symtab):
params = {'scale': False,
'center': False,
'epsilon': keras_layer.epsilon}
idx = 0
if keras_layer.scale:
params['scale'] = True
gamma = keras_layer.get_weights()[idx]
params['gamma'] = symtab.new_const(gamma)
idx += 1
if keras_layer.center:
params['center'] = True
beta = keras_layer.get_weights()[idx]
params['beta'] = symtab.new_const(beta)
idx += 1
moving_mean = keras_layer.get_weights()[idx]
moving_var = keras_layer.get_weights()[idx + 1]
params['moving_mean'] = symtab.new_const(moving_mean)
params['moving_var'] = symtab.new_const(moving_var)
return _sym.batch_norm(data=insym, **params)
def _convert_padding(insym, keras_layer, _):
_check_data_format(keras_layer)
padding_type = type(keras_layer).__name__
padding = keras_layer.padding
top = left = bottom = right = 0
if padding_type == 'ZeroPadding2D':
if isinstance(padding, int):
top = left = bottom = right = padding
elif isinstance(padding, tuple):
if isinstance(padding[0], int):
top, left = padding
bottom, right = padding
elif isinstance(padding[0], tuple):
top, bottom = padding[0]
left, right = padding[1]
else:
raise ValueError("Unrecognized padding option: {}".format(str(padding)))
else:
raise ValueError("Unrecognized padding option: {}".format(str(padding)))
elif padding_type == 'ZeroPadding1D':
raise NotImplementedError("ZeroPadding1D not implemented")
else:
raise ValueError("Unrecognized padding type: {}".format(padding_type))
return _sym.pad(data=insym, pad_width=((0, 0), (0, 0), (top, bottom), (left, right)))
def _convert_concat(insym, keras_layer, _):
_check_data_format(keras_layer)
if not isinstance(insym, list):
insym = [insym]
return _sym.concatenate(*insym, axis=1)
def _convert_reshape(insym, keras_layer, _):
_check_data_format(keras_layer)
ch = keras_layer.input_shape[-1]
assert ch == keras_layer.target_shape[-1], \
"Only supports last dimension in target shape being equal to " \
"the channel number of input tensor."
shape = (-1, ch) + keras_layer.target_shape[:-1]
return _sym.reshape(insym, shape=shape)
def _default_skip(insym, keras_layer, _): # pylint: disable=unused-argument
"""Layers that can be skipped because they are train time only."""
return insym
_convert_map = {
'Dense' : _convert_dense,
'Activation' : _convert_activation,
'LeakyReLU' : _convert_advanced_activation,
'PReLU' : _convert_advanced_activation,
'ELU' : _convert_advanced_activation,
'ThresholdedReLU' : _convert_advanced_activation,
'AveragePooling2D' : _convert_pooling,
'MaxPooling2D' : _convert_pooling,
'GlobalAveragePooling2D' : _convert_pooling,
'GlobalMaxPooling2D' : _convert_pooling,
'Conv2D' : _convert_convolution,
'Conv2DTranspose' : _convert_convolution,
'DepthwiseConv2D' : _convert_convolution,
'SeparableConv2D' : _convert_separable_convolution,
'Flatten' : _convert_flatten,
'Reshape' : _convert_reshape,
'Concatenate' : _convert_concat,
'BatchNormalization' : _convert_batchnorm,
'Add' : _convert_merge,
'Subtract' : _convert_merge,
'Multiply' : _convert_merge,
'ZeroPadding2D' : _convert_padding,
'UpSampling2D' : _convert_upsample,
# 'ZeroPadding1D' : _convert_padding,
# 'AveragePooling1D' : _convert_pooling,
# 'MaxPooling1D' : _convert_pooling,
# 'GlobalAveragePooling1D' : _convert_pooling,
# 'GlobalMaxPooling1D' : _convert_pooling,
# 'Cropping1D' : _convert_cropping,
# 'Cropping2D' : _convert_cropping,
# 'UpSampling1D' : _convert_upsample,
# 'UpSampling3D' : _convert_upsample,
# 'Conv1D' : _convert_convolution1d,
# 'GRU' : _convert_gru,
# 'LSTM' : _convert_lstm,
# 'SimpleRNN' : _convert_simple_rnn,
# 'Bidirectional' : _convert_bidirectional,
# 'TimeDistributed' : _default_skip,
# 'Average' : _convert_merge,
# 'Maximum' : _convert_merge,
# 'Dot' : _convert_merge,
# 'Permute' : _convert_permute,
# 'Embedding' : _convert_embedding,
# 'RepeatVector' : _convert_repeat_vector,
'InputLayer' : _default_skip,
'Dropout' : _default_skip,
'SpatialDropout2D' : _default_skip,
'SpatialDropout1D' : _default_skip,
}
def _check_unsupported_layers(model):
for layer in model.layers:
if type(layer).__name__ not in _convert_map:
raise ValueError("Keras layer {} not supported.".format(type(layer).__name__))
def keras_op_to_nnvm(insym, keras_layer, outname, symtab):
"""Convert keras layer to nnvm symbol, and update symtab.
Parameters
----------
insym : nnvm.symbol.Symbol or a list of it
The input nnvm symbol(s)
keras_layer : keras.layers
The keras layer to be converted
outname : str
Name of the output nnvm symbol
symtab : nnvm.frontend.common.SymbolTable
The global symbol table to be updated
"""
if type(keras_layer).__name__ not in _convert_map:
raise NotImplementedError("{} is not supported".format((type(keras_layer).__name__)))
ret = _convert_map[type(keras_layer).__name__](insym, keras_layer, symtab)
symtab.set_var(outname, ret)
def from_keras(model):
"""Convert keras model to NNVM format.
Parameters
----------
model : keras.engine.training.Model
The keras model to be converted
Returns
-------
sym : nnvm.Symbol
Compatible nnvm symbol
params : dict of str to tvm.NDArray
The parameter dict to be used by nnvm
"""
try:
import keras
except ImportError:
raise ImportError('Keras must be installed')
assert isinstance(model, keras.engine.training.Model)
if keras.backend.image_data_format() != 'channels_last':
raise ValueError("Keras frontend currently supports data_format = channels_last only.")
_check_unsupported_layers(model)
symtab = SymbolTable()
for keras_layer in model.layers:
if isinstance(keras_layer, keras.engine.topology.InputLayer):
symtab.get_var(keras_layer.name, must_contain=False)
else:
inbound_nodes = keras_layer.inbound_nodes if hasattr(keras_layer, 'inbound_nodes') \
else keras_layer._inbound_nodes if hasattr(keras_layer, '_inbound_nodes') \
else None
if inbound_nodes is None:
raise TypeError("Unknown layer type or unsupported Keras version : {}"
.format(keras_layer))
for my_idx, node in enumerate(inbound_nodes):
insym = []
# Since Keras allows creating multiple layers from the same name instance,
# we append node index to the symbol name to make it unique.
# The one exception is InputLayer. Changing input variable names after conversion
# would confuse users, so we should keep them as far as possible. Fortunately,
# they are named uniquely to input_1, input_2, input_3 ... by default.
for pred_idx, pred in zip(node.node_indices, node.inbound_layers):
if isinstance(pred, keras.engine.topology.InputLayer):
_sym = symtab.get_var(pred.name, must_contain=True)
else:
_sym = symtab.get_var(pred.name + ':' + str(pred_idx), must_contain=True)
insym.append(_sym)
if len(insym) == 1:
insym = insym[0]
keras_op_to_nnvm(insym, keras_layer, keras_layer.name + ':' + str(my_idx), symtab)
outsym = symtab.get_var(model.output_layers[0].name + ':0')
tvmparams = {k:tvm.nd.array(np.array(v, dtype=np.float32)) for k, v in symtab.params.items()}
return outsym, tvmparams
| [
"[email protected]"
] | |
555e47b52b537e75c5f7db4a5e347387352054ae | 2de2437bbf480f6518554bcb204106dd37262023 | /office365/sharepoint/portal/SPSiteCreationResponse.py | 571a183cf080a80520369bacb01a8d04eb63bccb | [
"MIT"
] | permissive | stardust85/Office365-REST-Python-Client | 386e5bba16cdee1472b7e23d405a4bf9b6f5e73a | cd369c607c7d137a000734e9c5e8f03ae3e3c603 | refs/heads/master | 2022-09-29T19:44:02.166438 | 2020-06-03T23:12:40 | 2020-06-03T23:12:40 | 269,356,313 | 0 | 0 | MIT | 2020-06-04T12:41:03 | 2020-06-04T12:41:02 | null | UTF-8 | Python | false | false | 285 | py | from office365.runtime.client_value_object import ClientValueObject
class SPSiteCreationResponse(ClientValueObject):
def __init__(self):
super(SPSiteCreationResponse, self).__init__()
self.SiteId = None
self.SiteStatus = None
self.SiteUrl = None
| [
"[email protected]"
] | |
a4663248aee0f453eeadac2ea056632f3e0246f5 | 2840fe577ab00f93b752c78d36077bab7e68dbf7 | /pp_validate.py | 6490f4bdc328efd4f5d24ace83ad28a2998e538b | [
"LicenseRef-scancode-warranty-disclaimer",
"LicenseRef-scancode-mit-taylor-variant"
] | permissive | jmsteitz/pipresents-gapless | 41ab4d3375a374e009adf5944535ca7c79afe0e6 | ad2252444c01617294545c62e5ddbb612384dc0e | refs/heads/master | 2020-05-25T20:12:09.112735 | 2017-03-16T14:09:10 | 2017-03-16T14:09:10 | 84,963,208 | 0 | 0 | null | 2017-03-14T15:10:15 | 2017-03-14T15:10:15 | null | UTF-8 | Python | false | false | 46,540 | py | import os
import json
import ConfigParser
from Tkinter import Toplevel, Scrollbar,Text
from Tkinter import VERTICAL,RIGHT,LEFT,BOTH,Y,NORMAL,END,DISABLED
"""
1/12/2016 - warn if foreign files in profile rather than abort
"""
class Validator(object):
def validate_profile(self, root, pp_dir, pp_home, pp_profile,editor_issue,display):
# USES
# self.current_showlist
# CREATES
# v_media_lists - file names of all medialists in the profile
# v_shows
# v_track_labels - list of track labels in current medialist.
# v_show_labels - list of show labels in the showlist
# v_medialist_refs - list of references to medialist files in the showlist
# open results display
self.result=ResultWindow(root,"Validate "+pp_profile,display)
self.result.display('t',"\nVALIDATING PROFILE '"+ pp_profile + "'")
if not os.path.exists(pp_profile+os.sep+"pp_showlist.json"):
self.result.display('f',"pp_showlist.json not in profile")
self.result.display('t', "Validation Aborted")
return False
ifile = open(pp_profile+os.sep+"pp_showlist.json", 'rb')
sdict= json.load(ifile)
ifile.close()
v_shows=sdict['shows']
if 'issue' in sdict:
profile_issue= sdict['issue']
else:
profile_issue="1.0"
if profile_issue != editor_issue:
self.result.display('f',"Profile version "+profile_issue+ " is different to that editor")
self.result.display('t', "Validation Aborted")
return False
# read the gpio config
# gpio_cfg_ok=read_gpio_cfg(pp_dir,pp_home,pp_profile)
# MAKE LIST OF SHOW LABELS
v_show_labels=[]
for show in v_shows:
if show['type'] != 'start': v_show_labels.append(show['show-ref'])
# CHECK ALL MEDIALISTS AND THEIR TRACKS
v_media_lists = []
for medialist_file in os.listdir(pp_profile):
if not medialist_file.endswith(".json") and medialist_file not in ('pp_io_config','readme.txt'):
self.result.display('w',"Non medialist file in profile: "+ medialist_file)
if medialist_file.endswith(".json") and medialist_file not in ('pp_showlist.json','schedule.json'):
self.result.display('t',"\nChecking medialist '"+medialist_file+"'")
v_media_lists.append(medialist_file)
# open a medialist and test its tracks
ifile = open(pp_profile + os.sep + medialist_file, 'rb')
sdict= json.load(ifile)
ifile.close()
tracks = sdict['tracks']
if 'issue' in sdict:
medialist_issue= sdict['issue']
else:
medialist_issue="1.0"
# check issue of medialist
if medialist_issue != editor_issue:
self.result.display('f',"Medialist version "+medialist_issue+ " is different to that editor")
self.result.display('t', "Validation Aborted")
return False
# open a medialist and test its tracks
v_track_labels=[]
anonymous=0
for track in tracks:
self.result.display('t'," Checking track '"+track['title']+"'")
# check track-ref
if track['track-ref'] == '':
anonymous+=1
else:
if track['track-ref'] in v_track_labels:
self.result.display('f',"'duplicate track reference: "+ track['track-ref'])
v_track_labels.append(track['track-ref'])
# warn if media tracks blank where optional
if track['type'] in ('audio','image','web','video'):
if track['location'].strip() == '':
self.result.display('w',"blank location")
# check location of relative media tracks where present
if track['type'] in ('video','audio','image','web'):
track_file=track['location']
if track_file.strip() != '' and track_file[0] == "+":
track_file=pp_home+track_file[1:]
if not os.path.exists(track_file): self.result.display('f',"location "+track['location']+ " Media File not Found")
if track['type'] in ('video','audio','message','image','web','menu'):
# check common fields
self.check_animate('animate-begin',track['animate-begin'])
self.check_animate('animate-end',track['animate-end'])
self.check_plugin(track['plugin'],pp_home)
self.check_show_control(track['show-control-begin'],v_show_labels)
self.check_show_control(track['show-control-end'],v_show_labels)
if track['background-image'] != '':
track_file=track['background-image']
if track_file[0] == "+":
track_file=pp_home+track_file[1:]
if not os.path.exists(track_file): self.result.display('f',"background-image "+track['background-image']+ " background image file not found")
if track['track-text'] != "":
if not track['track-text-x'].isdigit(): self.result.display('f',"'Track Text x position' is not 0 or a positive integer")
if not track['track-text-y'].isdigit(): self.result.display('f',"'Track Text y Position' is not 0 or a positive integer")
if track['track-text-colour']=='': self.result.display('f',"'Track Text Colour' is blank")
if track['track-text-font']=='': self.result.display('f',"'Track Text Font' is blank")
if track['type']=='menu':
self.check_menu(track)
if track['type'] == "image":
if track['duration'] != "" and not track['duration'].isdigit(): self.result.display('f',"'Duration' is not blank, 0 or a positive integer")
if track['image-rotate'] != "" and not track['image-rotate'].isdigit(): self.result.display('f',"'Image Rotation' is not blank, 0 or a positive integer")
self.check_image_window('track','image-window',track['image-window'])
if track['type'] == "video":
self.check_omx_window('track','omx-window',track['omx-window'])
self.check_volume('track','omxplayer-volume',track['omx-volume'])
if track['type'] == "audio":
if track['duration'] != '' and not track['duration'].isdigit(): self.result.display('f',"'Duration' is not 0 or a positive integer")
if track['duration'] == '0' : self.result.display('w',"'Duration' of an audio track is zero")
self.check_volume('track','mplayer-volume',track['mplayer-volume'])
if track['type'] == "message":
if track['duration'] != '' and not track['duration'].isdigit(): self.result.display('f',"'Duration' is not 0 or a positive integer")
if track['text'] != "":
if track['message-x'] != '' and not track['message-x'].isdigit(): self.result.display('f',"'Message x Position' is not blank, 0 or a positive integer")
if track['message-y'] != '' and not track['message-y'].isdigit(): self.result.display('f',"'Message y Position' is not blank, 0 or a positive integer")
if track['message-colour']=='': self.result.display('f',"'Message Text Colour' is blank")
if track['message-font']=='': self.result.display('f',"Message Text Font' is blank")
if track['type'] == 'web':
self.check_browser_commands(track['browser-commands'])
self.check_web_window('track','web-window',track['web-window'])
# CHECK CROSS REF TRACK TO SHOW
if track['type'] == 'show':
if track['sub-show'] == "":
self.result.display('f',"No 'Sub-show to Run'")
else:
if track['sub-show'] not in v_show_labels: self.result.display('f',"Sub-show "+track['sub-show'] + " does not exist")
# if anonymous == 0 :self.result.display('w',"zero anonymous tracks in medialist " + file)
# check for duplicate track-labels
# !!!!!!!!!!!!!!!!!! add check for all labels
# SHOWS
# find start show and test it, test show-refs at the same time
found=0
for show in v_shows:
if show['type'] == 'start':
self.result.display('t',"\nChecking show '"+show['title'] + "' first pass")
found+=1
if show['show-ref'] != 'start': self.result.display('f',"start show has incorrect label")
else:
self.result.display('t',"Checking show '"+show['title'] + "' first pass")
if show['show-ref'] == '': self.result.display('f',"Show Reference is blank")
if ' ' in show['show-ref']: self.result.display('f',"Spaces not allowed in Show Reference: " + show['show-ref'])
if found == 0:self.result.display('f',"There is no start show")
if found > 1:self.result.display('f',"There is more than 1 start show")
# check for duplicate show-labels
for show_label in v_show_labels:
found = 0
for show in v_shows:
if show['show-ref'] == show_label: found+=1
if found > 1: self.result.display('f',show_label + " is defined more than once")
# check other things about all the shows and create a list of medialist file references
v_medialist_refs=[]
for show in v_shows:
if show['type'] == "start":
self.result.display('t',"\nChecking show '"+show['title']+ "' second pass" )
self.check_start_shows(show,v_show_labels)
else:
self.result.display('t',"Checking show '"+show['title']+ "' second pass" )
if show['medialist']=='': self.result.display('f', show['show-ref']+ " show has blank medialist")
if '.json' not in show['medialist']:
self.result.display('f', show['show-ref']+ " show has invalid medialist")
self.result.display('t', "Validation Aborted")
return False
if show['medialist'] not in v_media_lists:
self.result.display('f', "'"+show['medialist']+ "' medialist not found")
self.result.display('t', "Validation Aborted")
return False
if not os.path.exists(pp_profile + os.sep + show['medialist']):
self.result.display('f', "'"+show['medialist']+ "' medialist file does not exist")
self.result.display('t', "Validation Aborted")
return False
v_medialist_refs.append(show['medialist'])
# open medialist and produce a dictionary of its contents for use later
ifile = open(pp_profile + os.sep + show['medialist'], 'rb')
tracks = json.load(ifile)['tracks']
ifile.close()
# make a list of the track labels
v_track_labels=[]
for track in tracks:
if track['track-ref'] !='':
v_track_labels.append(track['track-ref'])
# check common fields in the show
#show
self.check_show_canvas('show','Show Canvas',show['show-canvas'])
#show background and text
if show['show-text'] != "":
if not show['show-text-x'].isdigit(): self.result.display('f',"'Show Text x Position' is not 0 or a positive integer")
if not show['show-text-y'].isdigit(): self.result.display('f',"'Show Text y Position' is not 0 or a positive integer")
if show['show-text-colour']=='': self.result.display('f',"'Show Text Colour' is blank")
if show['show-text-font']=='': self.result.display('f',"'Show Text Font' is blank")
background_image_file=show['background-image']
if background_image_file.strip() != '' and background_image_file[0] == "+":
track_file=pp_home+background_image_file[1:]
if not os.path.exists(track_file): self.result.display('f',"Background Image "+show['background-image']+ " background image file not found")
#track defaults
if not show['duration'].isdigit(): self.result.display('f',"'Duration' is not 0 or a positive integer")
if not show['image-rotate'].isdigit(): self.result.display('f',"'Image Rotation' is not 0 or a positive integer")
self.check_volume('show','Video Player Volume',show['omx-volume'])
self.check_volume('show','Audio Volume',show['mplayer-volume'])
self.check_omx_window('show','Video Window',show['omx-window'])
self.check_image_window('show','Image Window',show['image-window'])
#eggtimer
if show['eggtimer-text'] != "":
if show['eggtimer-colour']=='': self.result.display('f',"'Eggtimer Colour' is blank")
if show['eggtimer-font']=='': self.result.display('f',"'Eggtimer Font' is blank")
if not show['eggtimer-x'].isdigit(): self.result.display('f',"'Eggtimer x Position' is not 0 or a positive integer")
if not show['eggtimer-y'].isdigit(): self.result.display('f',"'Eggtimer y Position' is not 0 or a positive integer")
# Validate simple fields of each show type
if show['type'] in ("mediashow",'liveshow'):
if show['child-track-ref'] != '':
if show['child-track-ref'] not in v_track_labels:
self.result.display('f',"'Child Track ' " + show['child-track-ref'] + ' is not in medialist' )
if not show['hint-y'].isdigit(): self.result.display('f',"'Hint y Position' is not 0 or a positive integer")
if not show['hint-x'].isdigit(): self.result.display('f',"'Hint x Position' is not 0 or a positive integer")
if show['hint-colour']=='': self.result.display('f',"'Hint Colour' is blank")
if show['hint-font']=='': self.result.display('f',"'Hint Font' is blank")
self.check_hh_mm_ss('Show Timeout',show['show-timeout'])
self.check_hh_mm_ss('Repeat Interval',show['interval'])
if not show['track-count-limit'].isdigit(): self.result.display('f',"'Track Count Limit' is not 0 or a positive integer")
if show['trigger-start-type']in('input','input-persist'):
self.check_triggers('Trigger for Start',show['trigger-start-param'])
if show['trigger-next-type'] == 'input':
self.check_triggers('Trigger for Next',show['trigger-next-param'])
if show['trigger-end-type'] == 'input':
self.check_triggers('Trigger for End',show['trigger-end-param'])
self.check_web_window('show','web-window',show['web-window'])
self.check_controls('controls',show['controls'])
#notices
if show['trigger-wait-text'] != "" or show['empty-text'] != "":
if show['admin-colour']=='': self.result.display('f',"' Notice Text Colour' is blank")
if show['admin-font']=='': self.result.display('f',"'Notice Text Font' is blank")
if not show['admin-x'].isdigit(): self.result.display('f',"'Notice Text x Position' is not 0 or a positive integer")
if not show['admin-y'].isdigit(): self.result.display('f',"'Notice Text y Position' is not 0 or a positive integer")
if show['type'] in ("artmediashow",'artliveshow'):
#notices
if show['empty-text'] != "":
if show['admin-colour']=='': self.result.display('f',"' Notice Text Colour' is blank")
if show['admin-font']=='': self.result.display('f',"'Notice Text Font' is blank")
if not show['admin-x'].isdigit(): self.result.display('f',"'Notice Text x Position' is not 0 or a positive integer")
if not show['admin-y'].isdigit(): self.result.display('f',"'Notice Text y Position' is not 0 or a positive integer")
self.check_controls('controls',show['controls'])
if show['type'] == "menu":
self.check_hh_mm_ss('Show Timeout',show['show-timeout'])
self.check_hh_mm_ss('Track Timeout',show['track-timeout'])
if show['menu-track-ref'] not in v_track_labels:
self.result.display('f',"'menu track ' is not in medialist: " + show['menu-track-ref'])
self.check_web_window('show','web-window',show['web-window'])
self.check_controls('controls',show['controls'])
if show['type'] == 'hyperlinkshow':
if show['first-track-ref'] not in v_track_labels:
self.result.display('f',"'first track ' is not in medialist: " + show['first-track-ref'])
if show['home-track-ref'] not in v_track_labels:
self.result.display('f',"'home track ' is not in medialist: " + show['home-track-ref'])
if show['timeout-track-ref'] not in v_track_labels:
self.result.display('f',"'timeout track ' is not in medialist: " + show['timeout-track-ref'])
self.check_hyperlinks('links',show['links'],v_track_labels)
self.check_hh_mm_ss('Show Timeout',show['show-timeout'])
self.check_hh_mm_ss('Track Timeout',show['track-timeout'])
self.check_web_window('show','web-window',show['web-window'])
if show['type'] == 'radiobuttonshow':
if show['first-track-ref'] not in v_track_labels:
self.result.display('f',"'first track ' is not in medialist: " + show['first-track-ref'])
self.check_radiobutton_links('links',show['links'],v_track_labels)
self.check_hh_mm_ss('Show Timeout',show['show-timeout'])
self.check_hh_mm_ss('Track Timeout',show['track-timeout'])
self.check_web_window('show','web-window',show['web-window'])
self.result.display('t', "\nValidation Complete")
self.result.stats()
if self.result.num_errors() == 0:
return True
else:
return False
def check_hh_mm_ss(self,name,item):
fields=item.split(':')
if len(fields) == 0:
return
if len(fields)>3:
self.result.display('f','Too many fields in '+ name + ': ' + item)
return
if len(fields) == 1:
seconds=fields[0]
minutes='0'
hours='0'
if len(fields) == 2:
seconds=fields[1]
minutes=fields[0]
hours='0'
if len(fields) == 3:
seconds=fields[2]
minutes=fields[1]
hours=fields[0]
if not seconds.isdigit() or not minutes.isdigit() or not hours.isdigit():
self.result.display('f','Fields of '+ name + ' are not positive integers: ' + item)
return
if int(minutes)>59 or int(seconds)>59:
if len(fields)<>1:
self.result.display('f','Fields of '+ name + ' are out of range: ' + item)
else:
self.result.display('w','Seconds or Minutes is greater then 59 in '+ name + ': ' + item)
return
def check_start_shows(self,show,v_show_labels):
text=show['start-show']
show_count=0
fields = text.split()
for field in fields:
show_count+=1
if field not in v_show_labels:
self.result.display('f',"start show has undefined Start Show: "+ field)
if show_count == 0:
self.result.display('w',"start show has zero Start Shows")
# ***********************************
# triggers
# ************************************
def check_triggers(self,field,line):
words=line.split()
if len(words)!=1: self.result.display('f','Wrong number of fields in: ' + field + ", " + line)
# ***********************************
# volume
# ************************************
def check_volume(self,track_type,field,line):
if track_type == 'show' and line.strip() == '':
self.result.display('f','Wrong number of fields: ' + field + ", " + line)
return
if track_type == 'track' and line.strip() == '':
return
if line[0] not in ('0','-'):
self.result.display('f','Invalid value: ' + field + ", " + line)
return
if line[0] == '0':
if not line.isdigit():
self.result.display('f','Invalid value: ' + field + ", " + line)
return
if int(line) != 0:
self.result.display('f','out of range -60 > 0: ' + field + ", " + line)
return
return
elif line[0] == '-':
if not line[1:].isdigit():
self.result.display('f','Invalid value: ' + field + ", " + line)
return
if int(line)<-60 or int(line)>0:
self.result.display('f','out of range -60 > 0: ' + field + ", " + line)
return
return
else:
self.result.display('f','help, do not understaand!: ' + field + ", " + line)
return
# ***********************************
# time of day inputs
# ************************************
def check_times(self,text):
lines = text.split("\n")
for line in lines:
self.check_times_line(line)
def check_times_line(self,line):
items = line.split()
if len(items) == 0: self.result.display('w','No time values when using time of day trigger: ')
for item in items:
self.check_times_item(item)
def check_times_item(self,item):
if item[0] == '+':
if not item.lstrip('+').isdigit():
self.result.display('f','Value of relative time is not positive integer: ' + item)
return
else:
# hh:mm;ss
fields=item.split(':')
if len(fields) == 0:
return
if len(fields) == 1:
self.result.display('f','Too few fields in time: ' + item)
return
if len(fields)>3:
self.result.display('f','Too many fields in time: ' + item)
return
if len(fields) != 3:
seconds='0'
else:
seconds=fields[2]
if not fields[0].isdigit() or not fields[1].isdigit() or not seconds.isdigit():
self.result.display('f','Fields of time are not positive integers: ' + item)
return
if int(fields[0])>23 or int(fields[1])>59 or int(seconds)>59:
self.result.display('f','Fields of time are out of range: ' + item)
return
def check_duration(self,field,line):
fields=line.split(':')
if len(fields) == 0:
self.result.display('f','End Trigger, ' + field +' Field is empty: ' + line)
return
if len(fields)>3:
self.result.display('f','End Trigger, ' + field + ' More then 3 fields: ' + line)
return
if len(fields) == 1:
secs=fields[0]
minutes='0'
hours='0'
if len(fields) == 2:
secs=fields[1]
minutes=fields[0]
hours='0'
if len(fields) == 3:
secs=fields[2]
minutes=fields[1]
hours=fields[0]
if not hours.isdigit() or not minutes.isdigit() or not secs.isdigit():
self.result.display('f','End Trigger, ' + field + ' Fields are not positive integers: ' + line)
return
if int(hours)>23 or int(minutes)>59 or int(secs)>59:
self.result.display('f','End Trigger, ' + field + ' Fields are out of range: ' + line)
return
# *******************
# Check menu
# ***********************
# window
# consistencty of modes
def check_menu(self,track):
if not track['menu-rows'].isdigit(): self.result.display('f'," Menu Rows is not 0 or a positive integer")
if not track['menu-columns'].isdigit(): self.result.display('f'," Menu Columns is not 0 or a positive integer")
if not track['menu-icon-width'].isdigit(): self.result.display('f'," Icon Width is not 0 or a positive integer")
if not track['menu-icon-height'].isdigit(): self.result.display('f'," Icon Height is not 0 or a positive integer")
if not track['menu-horizontal-padding'].isdigit(): self.result.display('f'," Horizontal Padding is not 0 or a positive integer")
if not track['menu-vertical-padding'].isdigit(): self.result.display('f'," Vertical padding is not 0 or a positive integer")
if not track['menu-text-width'].isdigit(): self.result.display('f'," Text Width is not 0 or a positive integer")
if not track['menu-text-height'].isdigit(): self.result.display('f'," Text Height is not 0 or a positive integer")
if not track['menu-horizontal-separation'].isdigit(): self.result.display('f'," Horizontal Separation is not 0 or a positive integer")
if not track['menu-vertical-separation'].isdigit(): self.result.display('f'," Vertical Separation is not 0 or a positive integer")
if not track['menu-strip-padding'].isdigit(): self.result.display('f'," Stipple padding is not 0 or a positive integer")
if not track['hint-x'].isdigit(): self.result.display('f',"'Hint x Position' is not 0 or a positive integer")
if not track['hint-y'].isdigit(): self.result.display('f',"'Hint y Position' is not 0 or a positive integer")
if not track['track-text-x'].isdigit(): self.result.display('f'," Menu Text x Position is not 0 or a positive integer")
if not track['track-text-y'].isdigit(): self.result.display('f'," Menu Text y Position is not 0 or a positive integer")
if track['menu-icon-mode'] == 'none' and track['menu-text-mode'] == 'none':
self.result.display('f'," Icon and Text are both None")
if track['menu-icon-mode'] == 'none' and track['menu-text-mode'] == 'overlay':
self.result.display('f'," cannot overlay none icon")
self.check_menu_window(track['menu-window'])
def check_menu_window(self,line):
if line == '':
self.result.display('f'," menu Window: may not be blank")
return
if line != '':
fields = line.split()
if len(fields) not in (1, 2,4):
self.result.display('f'," menu Window: wrong number of fields")
return
if len(fields) == 1:
if fields[0] != 'fullscreen':
self.result.display('f'," menu Window: single argument must be fullscreen")
return
if len(fields) == 2:
if not (fields[0].isdigit() and fields[1].isdigit()):
self.result.display('f'," menu Window: coordinates must be positive integers")
return
if len(fields) == 4:
if not(fields[0].isdigit() and fields[1].isdigit() and fields[2].isdigit() and fields[3].isdigit()):
self.result.display('f'," menu Window: coordinates must be positive integers")
return
# *******************
# Check plugin
# ***********************
def check_plugin(self,plugin_cfg,pp_home):
if plugin_cfg.strip() != '' and plugin_cfg[0] == "+":
plugin_cfg=pp_home+plugin_cfg[1:]
if not os.path.exists(plugin_cfg):
self.result.display('f','plugin configuration file not found: '+ plugin_cfg)
# *******************
# Check browser commands
# ***********************
def check_browser_commands(self,command_text):
lines = command_text.split('\n')
for line in lines:
if line.strip() == "":
continue
self.check_browser_command(line)
def check_browser_command(self,line):
fields = line.split()
if fields[0] == 'uzbl':
return
if len(fields) not in (1,2):
self.result.display('f','incorrect number of fields in browser command: '+ line)
return
command = fields[0]
if command not in ('load','refresh','wait','exit','loop'):
self.result.display('f','unknown command in browser commands: '+ line)
return
if command in ('refresh','exit','loop') and len(fields) != 1:
self.result.display('f','incorrect number of fields for '+ command + 'in: '+ line)
return
if command == 'load':
if len(fields) != 2:
self.result.display('f','incorrect number of fields for '+ command + 'in: '+ line)
return
if command == 'wait':
if len(fields) != 2:
self.result.display('f','incorrect number of fields for '+ command + 'in: '+ line)
return
arg = fields[1]
if not arg.isdigit():
self.result.display('f','Argument for Wait is not 0 or positive number in: '+ line)
return
# *******************
# Check controls
# *******************
def check_controls(self,name,controls_text):
lines = controls_text.split('\n')
for line in lines:
if line.strip() == "":
continue
self.check_control(line)
def check_control(self,line):
fields = line.split()
if len(fields) != 2 :
self.result.display('f',"incorrect number of fields in Control: " + line)
return
operation=fields[1]
if operation in ('up','down','play','stop','exit','pause','no-command','null') or operation[0:6] == 'mplay-' or operation[0:4] == 'omx-' or operation[0:5] == 'uzbl-':
return
else:
self.result.display('f',"unknown Command in Control: " + line)
# *******************
# Check hyperlinkshow links
# ***********************
def check_hyperlinks(self,name,links_text,v_track_labels):
lines = links_text.split('\n')
for line in lines:
if line.strip() == "":
continue
self.check_hyperlink(line,v_track_labels)
def check_hyperlink(self,line,v_track_labels):
fields = line.split()
if len(fields) not in (2,3):
self.result.display('f',"Incorrect number of fields in Control: " + line)
return
symbol=fields[0]
operation=fields[1]
if operation in ('home','null','stop','exit','repeat','pause','no-command') or operation[0:6] == 'mplay-' or operation[0:4] == 'omx-' or operation[0:5] == 'uzbl-':
return
elif operation in ('call','goto','jump'):
if len(fields)!=3:
self.result.display('f','Incorrect number of fields in Control: ' + line)
return
else:
operand=fields[2]
if operand not in v_track_labels:
self.result.display('f',operand + " Command argument is not in medialist: " + line)
return
elif operation == 'return':
if len(fields)==2:
return
else:
operand=fields[2]
if operand.isdigit() is True:
return
else:
if operand not in v_track_labels:
self.result.display('f',operand + " Command argument is not in medialist: " + line)
return
else:
self.result.display('f',"unknown Command in Control: " + line)
# *******************
# Check radiobuttonshow links
# ***********************
def check_radiobutton_links(self,name,links_text,v_track_labels):
lines = links_text.split('\n')
for line in lines:
if line.strip() == "":
continue
self.check_radiobutton_link(line,v_track_labels)
def check_radiobutton_link(self,line,v_track_labels):
fields = line.split()
if len(fields) not in (2,3):
self.result.display('f',"Incorrect number of fields in Control: " + line)
return
symbol=fields[0]
operation=fields[1]
if operation in ('return','stop','exit','pause','no-command') or operation[0:6] == 'mplay-' or operation[0:4] == 'omx-' or operation[0:5] == 'uzbl-':
return
elif operation == 'play':
if len(fields)!=3:
self.result.display('f','Incorrect number of fields in Control: ' + line)
return
else:
operand=fields[2]
if operand not in v_track_labels:
self.result.display('f',operand + " Command argument is not in medialist: " + line)
return
else:
self.result.display('f',"unknown Command in Control: " + line)
# ***********************************
# checking show controls
# ************************************
def check_show_control(self,text,v_show_labels):
lines = text.split("\n")
for line in lines:
self.check_show_control_fields(line,v_show_labels)
def check_show_control_fields(self,line,v_show_labels):
fields = line.split()
if len(fields) == 0:
return
# OSC command
elif len(fields)>0 and fields[0][0] =='/':
return
elif len(fields)==1:
if fields[0] not in ('exitpipresents','shutdownnow'):
self.result.display('f','Show control - Unknown command in: ' + line)
return
elif len(fields) == 2:
if fields[0] not in ('open','close'):
self.result.display('f','Show Control - Unknown command in: ' + line)
if fields[1] not in v_show_labels:
self.result.display('f',"Show Control - cannot find Show Reference: "+ line)
return
else:
self.result.display('f','Show Control - Incorrect number of fields in: ' + line)
return
# ***********************************
# checking animation
# ************************************
def check_animate_fields(self,field,line):
fields= line.split()
if len(fields) == 0: return
if len(fields)>4: self.result.display('f','Too many fields in: ' + field + ", " + line)
if len(fields)<4:
self.result.display('f','Too few fields in: ' + field + ", " + line)
return
delay_text=fields[0]
if not delay_text.isdigit(): self.result.display('f','Delay is not 0 or a positive integer in:' + field + ", " + line)
name = fields[1]
# name not checked - done at runtime
out_type = fields[2]
if out_type != 'state': self.result.display('f','Unknownl type in: ' + field + ", " + line)
to_state_text=fields[3]
if not (to_state_text in ('on','off')): self.result.display('f','Unknown parameter in: ' + field + ", " + line)
return
def check_animate(self,field,text):
lines = text.split("\n")
for line in lines:
self.check_animate_fields(field,line)
# *************************************
# GPIO CONFIG - NOT USED
# ************************************
def read_gpio_cfg(self,pp_dir,pp_home):
tryfile=pp_home+os.sep+"gpio.cfg"
if os.path.exists(tryfile):
filename=tryfile
else:
self.result.display('t', "gpio.cfg not found in pp_home")
tryfile=pp_dir+os.sep+'pp_resources'+os.sep+"gpio.cfg"
if os.path.exists(tryfile):
filename=tryfile
else:
self.result.display('w', "gpio.cfg not found in pipresents/pp_resources - GPIO checking turned off")
return False
self.config = ConfigParser.ConfigParser()
self.config.read(filename)
return True
def get(self,section,item):
if self.config.has_option(section,item) is False:
return False
else:
return self.config.get(section,item)
# *************************************
# WEB WINDOW
# ************************************
def check_web_window(self,track_type,field,line):
# check warp _ or xy2
fields = line.split()
if track_type == 'show' and len(fields) == 0:
self.result.display('f','Show must specify Web Window: ' + field + ", " + line)
return
if len(fields) == 0:
return
# deal with warp which has 1 or 5 arguments
if fields[0] != 'warp':
self.result.display('f','Illegal command: ' + field + ", " + line)
if len(fields) not in (1,5):
self.result.display('f','Wrong number of fields for warp: ' + field + ", " + line)
return
# deal with window coordinates
if len(fields) == 5:
# window is specified
if not (fields[1].isdigit() and fields[2].isdigit() and fields[3].isdigit() and fields[4].isdigit()):
self.result.display('f','coordinate is not a positive integer ' + field + ", " + line)
return
# *************************************
# SHOW CANVAS
# ************************************
def check_show_canvas(self,track_type,name,line):
fields=line.split()
if len(fields)== 0:
return
if len(fields) !=4:
self.result.display('f','wrong number of fields for ' + name + ", " + line)
return
else:
# show canvas is specified
if not (fields[0].isdigit() and fields[1].isdigit() and fields[2].isdigit() and fields[3].isdigit()):
self.result.display('f','coordinate is not a positive integer ' + name + ", " + line)
return
# *************************************
# IMAGE WINDOW
# ************************************
def check_image_window(self,track_type,field,line):
fields = line.split()
if track_type == 'show' and len(fields) == 0:
self.result.display('f','Show must specify Image Window: ' + field + ", " + line)
return
if len(fields) == 0:
return
# deal with original whch has 0 or 2 arguments
if fields[0] == 'original':
if len(fields) not in (1,3):
self.result.display('f','Wrong number of fields for original: ' + field + ", " + line)
return
# deal with window coordinates
if len(fields) == 3:
# window is specified
if not (fields[1].isdigit() and fields[2].isdigit()):
self.result.display('f','coordinate is not a positive integer ' + field + ", " + line)
return
return
else:
return
# deal with remainder which has 1, 2, 5 or 6arguments
# check basic syntax
if fields[0] not in ('shrink','fit','warp'):
self.result.display('f','Illegal command: ' + field + ", " + line)
return
if len(fields) not in (1,2,5,6):
self.result.display('f','Wrong number of fields: ' + field + ", " + line)
return
if len(fields) == 6 and fields[5] not in ('NEAREST','BILINEAR','BICUBIC','ANTIALIAS'):
self.result.display('f','Illegal Filter: ' + field + ", " + line)
return
if len(fields) == 2 and fields[1] not in ('NEAREST','BILINEAR','BICUBIC','ANTIALIAS'):
self.result.display('f','Illegal Filter: ' + field + ", " + line)
# deal with window coordinates
if len(fields) in (5,6):
# window is specified
if not (fields[1].isdigit() and fields[2].isdigit() and fields[3].isdigit() and fields[4].isdigit()):
self.result.display('f','coordinate is not a positive integer ' + field + ", " + line)
return
# *************************************
# VIDEO WINDOW
# ************************************
def check_omx_window(self,track_type,field,line):
fields = line.split()
if track_type == 'show' and len(fields) == 0:
self.result.display('f','show must have video window: ' + field + ", " + line)
return
if len(fields) == 0:
return
# deal with original which has 1
if fields[0] == 'original':
if len(fields) != 1:
self.result.display('f','Wrong number of fields for original: ' + field + ", " + line)
return
return
# deal with warp which has 1 or 5 arguments
# check basic syntax
if fields[0] != 'warp':
self.result.display('f','Illegal command: ' + field + ", " + line)
return
if len(fields) not in (1,5):
self.result.display('f','Wrong number of fields for warp: ' + field + ", " + line)
# deal with window coordinates
if len(fields) == 5:
# window is specified
if not (fields[1].isdigit() and fields[2].isdigit() and fields[3].isdigit() and fields[4].isdigit()):
self.result.display('f','coordinate is not a positive integer ' + field + ", " + line)
return
# *************************************
# RESULT WINDOW CLASS
# ************************************
class ResultWindow(object):
def __init__(self, parent, title,display_it):
self.display_it=display_it
self.errors=0
self.warnings=0
if self.display_it is False: return
top = Toplevel()
top.title(title)
scrollbar = Scrollbar(top, orient=VERTICAL)
self.textb = Text(top,width=80,height=40, wrap='word', font="arial 11",padx=5,yscrollcommand=scrollbar.set)
scrollbar.config(command=self.textb.yview)
scrollbar.pack(side=RIGHT, fill=Y)
self.textb.pack(side=LEFT, fill=BOTH, expand=1)
self.textb.config(state=NORMAL)
self.textb.delete(1.0, END)
self.textb.config(state=DISABLED)
def display(self,priority,text):
if priority == 'f': self.errors+=1
if priority == 'w':self.warnings +=1
if self.display_it is False: return
self.textb.config(state=NORMAL)
if priority == 't':
self.textb.insert(END, text+"\n")
if priority == 'f':
self.textb.insert(END, " ** Error: "+text+"\n\n")
if priority == 'w':
self.textb.insert(END, " ** Warning: "+text+"\n\n")
self.textb.config(state=DISABLED)
def stats(self):
if self.display_it is False: return
self.textb.config(state=NORMAL)
self.textb.insert(END, "\nErrors: "+str(self.errors)+"\nWarnings: "+str(self.warnings)+"\n\n\n")
self.textb.config(state=DISABLED)
def num_errors(self):
return self.errors
| [
"[email protected]"
] | |
add2fdf8fbb97db4726458d6089e1bea384ed165 | 8fc7b22d6ea7444e0b90d5fb8e361ace06b4cb57 | /setup.py | fad40934e3e44c29fddd2fe552a04cdead0b85d7 | [
"Apache-2.0"
] | permissive | rixx/django-hierarkey | 80a9569eca317d997560fc92d3d67e5083ae081e | e61f03bd1a35489905f3b08fdc18755f1ed07973 | refs/heads/master | 2021-06-07T09:47:59.710988 | 2020-07-21T14:57:27 | 2020-07-21T14:57:27 | 195,140,375 | 0 | 0 | Apache-2.0 | 2019-07-03T23:51:33 | 2019-07-03T23:51:32 | null | UTF-8 | Python | false | false | 1,490 | py | from codecs import open
from os import path
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
try:
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
except:
long_description = ''
try:
from hierarkey import version
except ImportError:
version = '?'
setup(
name='django-hierarkey',
version=version,
description='Hierarchical key-value store for django',
long_description=long_description,
url='https://github.com/raphaelm/django-hierarkey',
author='Raphael Michel',
author_email='[email protected]',
license='Apache License 2.0',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Other Audience',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Framework :: Django :: 2.0',
'Framework :: Django :: 2.1',
'Framework :: Django :: 3.0',
],
keywords='strings database models keyvalue',
install_requires=[
'python-dateutil'
],
packages=find_packages(exclude=['tests', 'tests.*', 'demoproject', 'demoproject.*']),
include_package_data=True,
)
| [
"[email protected]"
] | |
eb250e5339657728771d905ffbc0be84a8103fcc | 4e353bf7035eec30e5ad861e119b03c5cafc762d | /QtCore/QXmlStreamNamespaceDeclaration.py | 50587d69bdc7d2c462e766f31a2c38b6faa6a6d9 | [] | no_license | daym/PyQt4-Stubs | fb79f54d5c9a7fdb42e5f2506d11aa1181f3b7d5 | 57d880c0d453641e31e1e846be4087865fe793a9 | refs/heads/master | 2022-02-11T16:47:31.128023 | 2017-10-06T15:32:21 | 2017-10-06T15:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,849 | py | # encoding: utf-8
# module PyQt4.QtCore
# from C:\Python27\lib\site-packages\PyQt4\QtCore.pyd
# by generator 1.145
# no doc
# imports
import sip as __sip
class QXmlStreamNamespaceDeclaration(): # skipped bases: <type 'sip.simplewrapper'>
"""
QXmlStreamNamespaceDeclaration()
QXmlStreamNamespaceDeclaration(QXmlStreamNamespaceDeclaration)
QXmlStreamNamespaceDeclaration(QString, QString)
"""
def namespaceUri(self): # real signature unknown; restored from __doc__
""" QXmlStreamNamespaceDeclaration.namespaceUri() -> QStringRef """
return QStringRef
def prefix(self): # real signature unknown; restored from __doc__
""" QXmlStreamNamespaceDeclaration.prefix() -> QStringRef """
return QStringRef
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
| [
"[email protected]"
] | |
7adaffb1cbe579b1d161a731e9ac13a98af57b08 | 5a27471bc2ae4a815db2c58d047dbbea03cd8f77 | /comparisonFiles/codigos/Simulacion/LV/PDmasIcf_comparacion.py | 674865b1769c477c33b36cfaf1c0ec73d109887f | [] | no_license | ezalorpro/LaboratorioDeControl | 6ef52bb77b6a2283decb8c9fa153d7b43f019609 | ac286214f9a4b32298aa1caec808717f4b2d9a29 | refs/heads/master | 2023-01-20T19:27:56.233542 | 2020-03-15T20:24:10 | 2020-03-15T20:24:10 | 190,772,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,102 | py | import numpy as np
from scipy.interpolate import interp1d
from scipy.signal import correlate
from scipy.stats import energy_distance
from scipy.integrate import cumtrapz
from scipy import io
from matplotlib import pyplot as plt
import pickle
MatFileMATLAB = io.loadmat('comparisonFiles/Data MATLAB/Simulacion/PDcfmasI10', squeeze_me=True)
MatFileSciLab = io.loadmat('comparisonFiles/Data SciLab/Simulacion/PDmasIcf10', squeeze_me=True)
with open('comparisonFiles/Data LVSCCD/Simulacion/Controlador13.pkl', 'rb') as f:
t_lv, yout_lv, yc_lv, set_point, _ = pickle.load(f)
t_lv = np.asarray(t_lv)
yout_lv = np.asarray(yout_lv)
yc_lv = np.asarray(yc_lv)
set_point = np.asarray(set_point)
t_mat = MatFileMATLAB['t']
yout_mat = MatFileMATLAB['yout']
yc_mat = MatFileMATLAB['yc']
t_sci = MatFileSciLab['t']
yout_sci = MatFileSciLab['yout']
yc_sci = MatFileSciLab['yc']
if len(t_sci) > len(t_mat) and len(t_sci) > len(t_lv):
mask1 = t_sci <= max(t_mat)
mask2 = t_sci[mask1] <= max(t_lv)
t_sci = t_sci[mask1][mask2]
yout_sci = yout_sci[mask1][mask2]
funcion1 = interp1d(t_mat, yout_mat)
yout_mat = funcion1(t_sci)
funcion2 = interp1d(t_lv, yout_lv)
yout_lv = funcion2(t_sci)
funcion3 = interp1d(t_lv, set_point)
set_point = funcion3(t_sci)
t_comun = t_sci
if len(t_lv) > len(t_mat) and len(t_lv) > len(t_sci):
mask1 = t_lv <= max(t_mat)
mask2 = t_lv[mask1] <= max(t_sci)
t_lv = t_lv[mask1][mask2]
yout_lv = yout_lv[mask1][mask2]
set_point = set_point[mask1][mask2]
funcion1 = interp1d(t_mat, yout_mat)
yout_mat = funcion1(t_lv)
funcion2 = interp1d(t_sci, yout_sci)
yout_sci = funcion2(t_lv)
t_comun = t_lv
if len(t_mat) > len(t_sci) and len(t_mat) > len(t_lv):
mask1 = t_mat <= max(t_sci)
mask2 = t_mat[mask1] <= max(t_lv)
t_mat = t_mat[mask1][mask2]
yout_mat = yout_mat[mask1][mask2]
funcion1 = interp1d(t_lv, yout_lv)
yout_lv = funcion1(t_mat)
funcion2 = interp1d(t_sci, yout_sci)
yout_sci = funcion2(t_mat)
funcion3 = interp1d(t_lv, set_point)
set_point = funcion3(t_mat)
t_comun = t_mat
index_m = np.argmax([abs(yout_lv - yout_mat), abs(yout_lv - yout_sci)], axis=1)
index_temp = np.argmax([
abs(yout_lv[index_m[0]] - yout_mat[index_m[0]]),
abs(yout_lv[index_m[1]] - yout_sci[index_m[1]])
])
index_temp2 = np.argmax([
yout_lv[index_m[index_temp]],
yout_mat[index_m[index_temp]],
yout_sci[index_m[index_temp]]
])
index_temp3 = np.argmin([
yout_lv[index_m[index_temp]],
yout_mat[index_m[index_temp]],
yout_sci[index_m[index_temp]]
])
index_max = index_m[index_temp]
index_min = index_m[index_temp]
if index_temp2 == 0:
YMAX = yout_lv
elif index_temp2 == 1:
YMAX = yout_mat
else:
YMAX = yout_sci
if index_temp3 == 0:
YMIN = yout_lv
elif index_temp3 == 1:
YMIN = yout_mat
else:
YMIN = yout_sci
fig, ax = plt.subplots(figsize=(5.1, 4.2))
ax.plot(t_comun, yout_mat, color="#001C7F", label='MATLAB/ode45', linewidth=2)
ax.plot(t_comun, yout_lv, 'r', dashes=[1, 2], label='LV/RK2 sin filtro', linewidth=3)
ax.plot(t_comun, yout_sci, color="#12711C", dashes=[2, 2], label='SciLab/BDF-Newton', linewidth=2)
ax.plot(t_comun, set_point, 'k', linestyle='-.', label='SetPoint', linewidth=2)
ax.set_title('Controlador PD difuso mas integral con setpoint variable', fontsize=11)
ax.legend(loc=8, bbox_to_anchor=(0.37, 0))
ax.grid()
axins = ax.inset_axes([0.42, 0.65, 0.25, 0.25])
axins.plot(t_comun, yout_mat, color="#001C7F", linewidth=2)
axins.plot(t_comun, yout_lv, 'r', dashes=[1, 2], linewidth=3)
axins.plot(t_comun, yout_sci, color="#12711C", dashes=[2, 2], linewidth=2)
axins.plot(t_comun, set_point, 'k', linestyle='-.', linewidth=2)
axins.grid()
axins.set_xlim(t_comun[index_max] - 0.1, t_comun[index_min] + 0.1)
axins.set_ylim(YMIN[index_min] - 1 * abs(YMIN[index_min] - YMAX[index_min]) / 2,
YMAX[index_max] + 1 * abs(YMIN[index_min] - YMAX[index_min]) / 2)
ax.indicate_inset_zoom(axins)
fig.tight_layout()
plt.savefig('comparisonFiles/plots/Simulacion/PDmasIc.pdf')
plt.show() | [
"[email protected]"
] | |
bea4ba346ee7ce82719f9664f3447a91400044e8 | 16f36b0fc607cb9c0d7b4eb7d5123a1b7ed40c62 | /untitled1/.idea/sda.py | aa69dcd7d98aab5175474796216259bf79104703 | [] | no_license | IanChen6/python-learning | 64c5137f536d10ffc10a9664da43ec02722c95de | fea998620ba0a354a741cdbc9d8455bca4080bae | refs/heads/master | 2021-01-23T18:45:18.595877 | 2017-10-31T10:34:51 | 2017-10-31T10:34:51 | 102,805,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | #!/usr/bin/env python3
#_*_ coding:utf-8 _*_
import sys
print(len("中文"))
print(sys.getdefaultencoding())
print(len("中文".encode("utf-8")))
print(sys.getdefaultencoding())
import scrapy
| [
"[email protected]"
] | |
f4ae8716a1913caf616981c80109ad0bd68f39a5 | e2bf489830e55a57945b8e696f8e2d6acefeb560 | /05-系统编程-2/06-列表传递给线程.py | f6412c260a9060ce455498ed6ed3712e669c1585 | [] | no_license | taizilinger123/pythonjichu | e713de06fb050943a8a1e0256ccba8dea40a411d | 5ee896e92edbac55d02aa63965d896200b8c2623 | refs/heads/master | 2023-04-01T02:00:37.557667 | 2023-03-31T05:08:40 | 2023-03-31T05:08:40 | 148,663,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | from threading import Thread
import time
def work1(nums):
nums.append(44)
print("----in work1---",nums)
def work2(nums):
#延时一会,保证t1线程中的事情做完
time.sleep(1)
print("----in work2---",nums)
g_nums = [11,22,33]
t1 = Thread(target=work1, args=(g_nums,))
t1.start()
t2 = Thread(target=work2, args=(g_nums,))
t2.start()
| [
"[email protected]"
] | |
4b0f0f8ce51b0e74329b1c5d2ed22111fce36c37 | f09dc121f213f2881df3572288b7ee5b39246d73 | /aliyun-python-sdk-drds/aliyunsdkdrds/request/v20150413/ProductInfoComplementRequest.py | 41256ac6198538d4dcc5655592121677237a8a82 | [
"Apache-2.0"
] | permissive | hetw/aliyun-openapi-python-sdk | 2f31378ad6be0896fb8090423f607e9c7d3ae774 | 7443eacee9fbbaa93c7975c6dbec92d3c364c577 | refs/heads/master | 2023-01-19T22:42:36.214770 | 2020-12-04T10:55:14 | 2020-12-04T10:55:14 | 318,689,093 | 1 | 0 | NOASSERTION | 2020-12-05T03:03:03 | 2020-12-05T03:03:03 | null | UTF-8 | Python | false | false | 1,567 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkdrds.endpoint import endpoint_data
class ProductInfoComplementRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Drds', '2015-04-13', 'ProductInfoComplement','Drds')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_gender(self):
return self.get_query_params().get('gender')
def set_gender(self,gender):
self.add_query_param('gender',gender)
def get_name(self):
return self.get_query_params().get('name')
def set_name(self,name):
self.add_query_param('name',name) | [
"[email protected]"
] | |
1ae2de28aadc8ec72bc4790674f8652982a75968 | 931515a9fdd4404cb548fb6b80c91590f5d5e3c9 | /presalytics/client/presalytics_ooxml_automation/models/theme_background_fills_details.py | 523ff5504f7e35da55d93fbdc0052ca07a5c56ce | [
"MIT"
] | permissive | presalytics/python-client | 2e2fbd617b493ed8be90b844e23b736f294065e3 | 5d80b78562126feeeb49af4738e2c1aed12dce3a | refs/heads/master | 2021-08-18T02:41:06.938468 | 2020-12-07T15:04:18 | 2020-12-07T15:04:18 | 203,414,411 | 4 | 1 | MIT | 2020-03-31T19:27:47 | 2019-08-20T16:31:57 | Python | UTF-8 | Python | false | false | 9,185 | py | # coding: utf-8
"""
OOXML Automation
This API helps users convert Excel and Powerpoint documents into rich, live dashboards and stories. # noqa: E501
The version of the OpenAPI document: 0.1.0-no-tags
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ThemeBackgroundFillsDetails(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'theme_id': 'str',
'theme': 'ThemeThemesDetails',
'intensity_id': 'int',
'fill_map': 'SharedFillMapDetails',
'id': 'str',
'date_created': 'datetime',
'user_created': 'str',
'date_modified': 'datetime',
'user_modified': 'str'
}
attribute_map = {
'theme_id': 'themeId',
'theme': 'theme',
'intensity_id': 'intensityId',
'fill_map': 'fillMap',
'id': 'id',
'date_created': 'dateCreated',
'user_created': 'userCreated',
'date_modified': 'dateModified',
'user_modified': 'userModified'
}
def __init__(self, theme_id=None, theme=None, intensity_id=None, fill_map=None, id=None, date_created=None, user_created=None, date_modified=None, user_modified=None): # noqa: E501
"""ThemeBackgroundFillsDetails - a model defined in OpenAPI""" # noqa: E501
self._theme_id = None
self._theme = None
self._intensity_id = None
self._fill_map = None
self._id = None
self._date_created = None
self._user_created = None
self._date_modified = None
self._user_modified = None
self.discriminator = None
self.theme_id = theme_id
if theme is not None:
self.theme = theme
if intensity_id is not None:
self.intensity_id = intensity_id
if fill_map is not None:
self.fill_map = fill_map
if id is not None:
self.id = id
if date_created is not None:
self.date_created = date_created
if user_created is not None:
self.user_created = user_created
if date_modified is not None:
self.date_modified = date_modified
if user_modified is not None:
self.user_modified = user_modified
@property
def theme_id(self):
"""Gets the theme_id of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The theme_id of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: str
"""
return self._theme_id
@theme_id.setter
def theme_id(self, theme_id):
"""Sets the theme_id of this ThemeBackgroundFillsDetails.
:param theme_id: The theme_id of this ThemeBackgroundFillsDetails. # noqa: E501
:type: str
"""
self._theme_id = theme_id
@property
def theme(self):
"""Gets the theme of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The theme of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: ThemeThemesDetails
"""
return self._theme
@theme.setter
def theme(self, theme):
"""Sets the theme of this ThemeBackgroundFillsDetails.
:param theme: The theme of this ThemeBackgroundFillsDetails. # noqa: E501
:type: ThemeThemesDetails
"""
self._theme = theme
@property
def intensity_id(self):
"""Gets the intensity_id of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The intensity_id of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: int
"""
return self._intensity_id
@intensity_id.setter
def intensity_id(self, intensity_id):
"""Sets the intensity_id of this ThemeBackgroundFillsDetails.
:param intensity_id: The intensity_id of this ThemeBackgroundFillsDetails. # noqa: E501
:type: int
"""
self._intensity_id = intensity_id
@property
def fill_map(self):
"""Gets the fill_map of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The fill_map of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: SharedFillMapDetails
"""
return self._fill_map
@fill_map.setter
def fill_map(self, fill_map):
"""Sets the fill_map of this ThemeBackgroundFillsDetails.
:param fill_map: The fill_map of this ThemeBackgroundFillsDetails. # noqa: E501
:type: SharedFillMapDetails
"""
self._fill_map = fill_map
@property
def id(self):
"""Gets the id of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The id of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ThemeBackgroundFillsDetails.
:param id: The id of this ThemeBackgroundFillsDetails. # noqa: E501
:type: str
"""
self._id = id
@property
def date_created(self):
"""Gets the date_created of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The date_created of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: datetime
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""Sets the date_created of this ThemeBackgroundFillsDetails.
:param date_created: The date_created of this ThemeBackgroundFillsDetails. # noqa: E501
:type: datetime
"""
self._date_created = date_created
@property
def user_created(self):
"""Gets the user_created of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The user_created of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: str
"""
return self._user_created
@user_created.setter
def user_created(self, user_created):
"""Sets the user_created of this ThemeBackgroundFillsDetails.
:param user_created: The user_created of this ThemeBackgroundFillsDetails. # noqa: E501
:type: str
"""
self._user_created = user_created
@property
def date_modified(self):
"""Gets the date_modified of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The date_modified of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: datetime
"""
return self._date_modified
@date_modified.setter
def date_modified(self, date_modified):
"""Sets the date_modified of this ThemeBackgroundFillsDetails.
:param date_modified: The date_modified of this ThemeBackgroundFillsDetails. # noqa: E501
:type: datetime
"""
self._date_modified = date_modified
@property
def user_modified(self):
"""Gets the user_modified of this ThemeBackgroundFillsDetails. # noqa: E501
:return: The user_modified of this ThemeBackgroundFillsDetails. # noqa: E501
:rtype: str
"""
return self._user_modified
@user_modified.setter
def user_modified(self, user_modified):
"""Sets the user_modified of this ThemeBackgroundFillsDetails.
:param user_modified: The user_modified of this ThemeBackgroundFillsDetails. # noqa: E501
:type: str
"""
self._user_modified = user_modified
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ThemeBackgroundFillsDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
6b318fa6df2f38457877794dba277c5ba5cc3a84 | f26af24795d913a4dd17f467052255d256c95032 | /apps/price/models.py | 168110b8818fd660a191212deac9e181f91eaf29 | [] | no_license | minimedj/3dhero.ru | ccbd8d5d37fe149e6194457e66cfc338afe21bd6 | 5790f448fe03eecf79760c2e73154f0831abaf54 | refs/heads/master | 2021-01-22T21:08:00.632873 | 2016-03-28T13:11:26 | 2016-03-28T13:11:26 | 85,397,391 | 1 | 0 | null | 2017-03-18T11:49:44 | 2017-03-18T11:49:44 | null | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
from apps.file.models import File
from google.appengine.ext import ndb
from model import Base
from werkzeug.wrappers import cached_property
class PriceFile(Base):
order_id = ndb.IntegerProperty(
default=0,
verbose_name=u'Порядок сортиовки'
)
file = ndb.KeyProperty(File)
@cached_property
def get_file(self):
if self.file:
return self.file.get()
else:
return None
@classmethod
def _pre_delete_hook(cls, key):
obj = key.get()
if obj and obj.file:
obj.file.delete() | [
"[email protected]"
] | |
c3dbc32ffea4a9344505e038a381bfe1d443f5d0 | 3b0336b70c400cac212a9877a45393bb143327dc | /pymontecarlo/util/parameter.py | 3d5bc3e4391f5a9678f8089827a5d72f11e7e76b | [] | no_license | silrichter/pymontecarlo | 85c0966d4be776e44a51d0665cbd8f8240872a62 | 77b4b4ede221cea3f3177f9fe84ee89a2a85cb8b | refs/heads/master | 2021-01-17T06:20:16.719448 | 2016-01-22T17:23:28 | 2016-01-22T17:23:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,673 | py | #!/usr/bin/env python
"""
================================================================================
:mod:`option` -- Building block for options
================================================================================
.. module:: option
:synopsis: Building block for options
.. inheritance-diagram:: pymontecarlo.options.option
"""
# Script information for the file.
__author__ = "Philippe T. Pinard"
__email__ = "[email protected]"
__version__ = "0.1"
__copyright__ = "Copyright (c) 2014 Philippe T. Pinard"
__license__ = "GPL v3"
# Standard library modules.
import copy
import operator
from operator import itemgetter, attrgetter
from collections import MutableMapping, MutableSet, MutableSequence
# Third party modules.
import numpy as np
# Local modules.
from pymontecarlo.util.multipleloop import combine
# Globals and constants variables.
class ParameterizedMetaclass(type):
"""
Meta class that automatically registered parameters defined in the class
header.
"""
def __new__(cls, clsname, bases, methods):
parameters = {}
# Parameters from parents
parents = [b for b in bases if isinstance(b, ParameterizedMetaclass)]
for base in parents:
parameters.update(base.__parameters__)
# Attach attribute names to parameters
for key, value in list(methods.items()):
if not isinstance(value, Parameter):
continue
value._new(cls, clsname, bases, methods, key)
parameters[value.name] = value
# Add __parameters__ attribute
methods['__parameters__'] = parameters
return type.__new__(cls, clsname, bases, methods)
class Parameter(object):
def __init__(self, dtype=object, validators=None, fields=None,
required=True, doc=None):
self._dtype = np.dtype(dtype)
if self._dtype.hasobject:
self._dtype = np.dtype(dtype, metadata={'class': dtype})
if validators is None:
validators = []
if not hasattr(validators, '__iter__'):
validators = [validators]
self._validators = validators
if fields is not None and len(fields) == 0:
raise ValueError('At least one field must be specified')
if fields is None:
fields = []
self._fields = fields
self._required = required
self.__doc__ = doc
def __repr__(self):
return '<%s(%s)>' % (self.__class__.__name__, self.name)
def __get__(self, obj, objtype=None, simplify=True):
if obj is None:
return self
if self.name not in obj.__dict__:
raise AttributeError("No value for attribute '%s'" % self.name)
values = obj.__dict__[self.name]
if simplify:
values = self._simplify_values(values)
return values
def __set__(self, obj, values):
if not obj.__dict__.get(self.name, np.array([])).flags.writeable:
raise ValueError("Frozen parameter")
values = self._parse_values(values)
self.validate(self._simplify_values(values))
obj.__dict__[self.name] = values
def _new(self, cls, clsname, bases, methods, name):
self._name = name
def _parse_values(self, values):
# Hack when values are numpy record
if hasattr(values, 'tolist'):
values = values.tolist()
# Generate 2d array
# One value per row, each column corresponds to a field
values = np.array(values, dtype=self._dtype, ndmin=2)
# Check dtype for object
if self._dtype.hasobject:
klass = self._dtype.metadata['class']
for value in values.flat:
if not isinstance(value, klass):
raise ValueError("Wrong type of values: '%s' != '%s'" % \
(value.__class__.__name__, klass.__name__))
# Reshape values to have one value per row
try:
values = values.reshape((-1, len(self._fields) or 1))
except:
raise ValueError('Inconsistent number of values. ' + \
'Expected %i number per value' % len(self._fields))
# Create recarray if fields are defined
if self.has_field():
dtype = [(field, values.dtype) for field in self._fields]
values = np.rec.fromarrays(values.transpose(), dtype)
return values
def _simplify_values(self, values):
if values.size == 1:
return next(values.flat)
if not self.has_field():
return values[:, 0]
else:
if len(values) == 1:
return values[0]
else:
return values
def validate(self, values):
if not hasattr(values, '__iter__'):
values = [values]
if self.is_required() and len(values) == 0:
raise ValueError('%s is required and no values are provided' % self.name)
for value in values:
for validator in self._validators:
validator(value)
def freeze(self, obj):
if self.name not in obj.__dict__:
obj.__dict__[self.name] = np.array([])
obj.__dict__[self.name].flags.writeable = False
def has_field(self):
return len(self._fields) > 0
def set_required(self, state):
self._required = state
def is_required(self):
return self._required
@property
def name(self):
return self._name
@property
def dtype(self):
return self._dtype
class Alias(object):
def __init__(self, alias, doc=None):
"""
Creates an alias of a parameter.
If the value of the alias is modified, the value of the original
parameter will also be modified.
:arg alias: original parameter
:arg doc: documentation
"""
self._alias = alias
self.__doc__ = doc
def __repr__(self):
return '<%s(Alias of %s)>' % (self.__class__.__name__, self._alias.name)
def __get__(self, obj, objtype=None, simplify=True):
if obj is None:
return self
return self._alias.__get__(obj, objtype, simplify)
def __set__(self, obj, value):
self._alias.__set__(obj, value)
def has_field(self):
return self._alias.has_field()
def is_required(self):
return self._alias.is_required()
def freeze(self, obj):
self._alias.freeze(obj)
# class FrozenParameter(Parameter):
#
# def __init__(self, klass_or_value, doc=None, args=(), kwargs=None):
# """
# Creates a frozen parameter.
# Either the frozen value of this parameter should be specified, or
# a class which will be instantiated when the parameter is first
# retrieved.
#
# :arg klass_or_value: frozen class or value
# :arg doc: documentation
# :arg args: arguments to be passed to the class during instantiation
# :arg kwargs: keyword-arguments to be passed to the class
# during instantiation
# """
# Parameter.__init__(self, None, doc)
#
# self._value = klass_or_value
# self._klass_args = args
# if kwargs is None: kwargs = {}
# self._klass_kwargs = kwargs
#
# def __get__(self, obj, objtype=None):
# if obj is None:
# return self
#
# if not self.name in obj.__dict__:
# value = self._value
# if inspect.isclass(value):
# value = self._value(*self._klass_args, **self._klass_kwargs)
# self._validate(value)
# obj.__dict__[self.name] = {'value': value, 'frozen': True}
#
# return Parameter.__get__(self, obj, objtype=objtype)
class FactorAlias(Alias):
"""
Multiplies the set value(s) by the specified factor before passing them
to the alias parameter and divides the returned value(s) from the
alias parameter by the specified factor.
"""
def __init__(self, alias, factor):
Alias.__init__(self, alias)
self._factor = factor
def __get__(self, obj, objtype=None, simplify=True):
if obj is None:
return self
values = Alias.__get__(self, obj, objtype, False)
# Hack since record and recarray do not have ufunc
if isinstance(values, np.rec.recarray):
tmpvalues = values.view((self._alias._dtype, len(values.dtype.names))) / self._factor
values = np.rec.fromarrays(tmpvalues.transpose(), values.dtype)
else:
values = values / self._factor
if simplify:
values = self._alias._simplify_values(values)
return values
def __set__(self, obj, values):
values = np.array(values) * self._factor
Alias.__set__(self, obj, values)
#
class AngleParameter(Parameter):
"""
Automatically defined two parameters to specified an angle value in
radians or degrees::
class Object(object, metaclass=OptionMetaclass):
angle = AngleParameter()
obj.angle_rad = math.pi
print obj.angle_deg # 180.0
"""
def __init__(self, validators=None, fields=None, required=True, doc=None):
Parameter.__init__(self, np.float, validators, fields, required, doc)
def _new(self, cls, clsname, bases, methods, name):
parameter = methods.pop(name)
methods[name + '_rad'] = parameter
methods[name + '_deg'] = FactorAlias(parameter, np.pi / 180.0)
Parameter._new(self, cls, clsname, bases, methods, name + '_rad')
class UnitParameter(Parameter):
"""
Automatically defined all possible unit prefix (M, k, d, etc.) for
a quantity::
class Object(object, metaclass=OptionMetaclass):
distance = UnitParameter('m')
obj = Object()
obj.distance_cm = 156
print obj.distance_m # 1.56
"""
_PREFIXES = [('y', 1e-24), # yocto
('z', 1e-21), # zepto
('a', 1e-18), # atto
('f', 1e-15), # femto
('p', 1e-12), # pico
('n', 1e-9), # nano
('u', 1e-6), # micro
('m', 1e-3), # mili
('c', 1e-2), # centi
('d', 1e-1), # deci
('k', 1e3), # kilo
('M', 1e6), # mega
('G', 1e9), # giga
('T', 1e12), # tera
('P', 1e15), # peta
('E', 1e18), # exa
('Z', 1e21), # zetta
('Y', 1e24)] # yotta
def __init__(self, unit, validators=None, fields=None, required=True, doc=None):
Parameter.__init__(self, float, validators, fields, required, doc)
self._unit = unit
def _new(self, cls, clsname, bases, methods, name):
parameter = methods.pop(name)
methods[name + '_' + self.unit] = parameter
for prefix, factor in self._PREFIXES:
methods['%s_%s%s' % (name, prefix, self.unit)] = \
FactorAlias(parameter, factor)
Parameter._new(self, cls, clsname, bases, methods, name + "_" + self.unit)
@property
def unit(self):
return self._unit
class TimeParameter(Parameter):
"""
Automatically defined all possible time prefix (s, min, hr, etc.) for
a quantity::
class Object(object, metaclass=OptionMetaclass):
duration = TimeParameter()
obj = Object()
obj.duration_s = 78
print obj.duration_min # 1.3
"""
_factors = {'year': 31536000.0,
'month': 2628000.0,
'day': 86400.0,
'hr': 3600.0,
'min': 60.0}
def __init__(self, validators=None, fields=None, required=True, doc=None):
Parameter.__init__(self, np.float, validators, fields, required, doc)
def _new(self, cls, clsname, bases, methods, name):
parameter = methods.pop(name)
methods[name + '_s'] = parameter
for unit, factor in self._factors.items():
methods['%s_%s' % (name, unit)] = \
FactorAlias(parameter, factor)
Parameter._new(self, cls, clsname, bases, methods, name + '_s')
def range_validator(low=-np.infty, high=np.infty, inclusive=True):
"""
Validates if a value is between the low and high limits, inclusively.
"""
if inclusive:
op1 = operator.lt
op2 = operator.gt
else:
op1 = operator.le if not np.isinf(low) else operator.lt
op2 = operator.ge if not np.isinf(high) else operator.gt
op1str = '[' if op1.__name__ == 'lt' else ']'
op2str = ']' if op2.__name__ == 'gt' else '['
def validator(value):
if op1(value, low) or op2(value, high):
raise ValueError('Value (%s) must be between %s%s, %s%s' % \
(value, op1str, low, high, op2str))
return validator
def notempty_validator():
"""
Validates if a value is not empty or not None.
"""
def validator(value):
if value is None or not bool(value):
raise ValueError('Empty value or None')
return validator
def enum_validator(constants):
"""
Validates that the value is within the specified constant values.
:arg constants: constant values
"""
constants = frozenset(constants)
def validator(value):
if value not in constants:
raise ValueError("Incorrect value(s), possible values: " + str(constants))
return validator
class ParameterizedMutableMapping(MutableMapping):
def __init__(self, *parameter_args, **parameter_kwargs):
self._parameter_args = parameter_args
self._parameter_kwargs = parameter_kwargs
self.__parameters__ = {}
def __repr__(self):
valstr = ', '.join(['%s: %s' % item for item in self.items()])
return '<%s(%s)>' % (self.__class__.__name__, valstr)
def __str__(self):
return str(dict(self))
def __len__(self):
return len(self.__parameters__)
def __getitem__(self, key):
if key not in self.__parameters__:
raise KeyError(key)
return self.__parameters__[key].__get__(self)
def __delitem__(self, key):
if key not in self.__parameters__:
raise KeyError(key)
del self.__dict__[key]
del self.__parameters__[key]
def __setitem__(self, key, value):
try:
parameter = self.__parameters__[key]
except KeyError:
parameter = Parameter(*self._parameter_args, **self._parameter_kwargs)
parameter._name = key
self.__parameters__[key] = parameter
parameter.__set__(self, value)
def __iter__(self):
return iter(self.__parameters__)
class ParameterizedMutableSet(MutableSet):
def __init__(self, *parameter_args, **parameter_kwargs):
self._parameter_args = parameter_args
self._parameter_kwargs = parameter_kwargs
self.__parameters__ = {}
def __repr__(self):
valstr = ', '.join(map(str, self))
return '<%s(%s)>' % (self.__class__.__name__, valstr)
def __str__(self):
return str(set(self))
def __len__(self):
return len(self.__parameters__)
def __iter__(self):
for parameter in self.__parameters__.values():
yield parameter.__get__(self)
def __contains__(self, item):
return self._get_key(item) in self.__parameters__
# def __deepcopy__(self, memo):
# Override
# cls = self.__class__
# result = cls.__new__(cls)
# memo[id(self)] = result
# for k, v in self.__dict__.items():
# result.__dict__[k] = copy.deepcopy(v, memo)
#
# # Key must be update with new key from objects
# for key, parameter in result.__parameters__.items():
# values = parameter.__get__(self, simplify=False)
# newkey = self._get_key(values)
#
# del result.__parameters__[key]
# parameter._name = newkey
# result.__parameters__[newkey] = parameter
#
# del result.__dict__[key]
# result.__dict__[newkey] = values
#
# return result
def _get_key(self, item):
return str(hash(item))
def add(self, item):
key = self._get_key(item)
try:
parameter = self.__parameters__[key]
except KeyError:
parameter = Parameter(*self._parameter_args, **self._parameter_kwargs)
parameter._name = key
self.__parameters__[key] = parameter
parameter.__set__(self, item)
def discard(self, item):
key = self._get_key(item)
if key not in self.__parameters__:
raise KeyError(key)
del self.__parameters__[key]
del self.__dict__[key]
def update(self, items):
for item in items:
self.add(item)
class ParameterizedMutableSequence(MutableSequence):
def __init__(self, *parameter_args, **parameter_kwargs):
self._parameter_args = parameter_args
self._parameter_kwargs = parameter_kwargs
self._parameter_keys = []
self.__parameters__ = {}
def __repr__(self):
valstr = ', '.join(map(str, self))
return '<%s(%s)>' % (self.__class__.__name__, valstr)
def __str__(self):
return str(list(self))
def __len__(self):
return len(self.__parameters__)
def __iter__(self):
for key in self._parameter_keys:
yield self.__parameters__[key].__get__(self)
def __contains__(self, item):
return self._get_key(item) in self._parameter_keys
def __setitem__(self, index, item):
oldkey = self._parameter_keys[index]
parameter = self.__parameters__.pop(oldkey)
newkey = self._get_key(item)
self._parameter_keys[index] = newkey
parameter.__set__(self, item)
parameter._name = newkey
self.__parameters__[newkey] = parameter
def __getitem__(self, index):
key = self._parameter_keys[index]
return self.__parameters__[key].__get__(self)
def __delitem__(self, index):
key = self._parameter_keys.pop(index)
del self._parameter_keys[key]
def _get_key(self, item):
return str(hash(item))
def insert(self, index, item):
try:
key = self._parameter_keys[index]
except IndexError:
key = self._get_key(item)
parameter = Parameter(*self._parameter_args, **self._parameter_kwargs)
parameter._name = key
self.__parameters__[key] = parameter
self._parameter_keys.insert(index, key)
else:
parameter = self.__parameters__[key]
parameter.__set__(self, item)
def iter_parameters(obj):
"""
Recursively iterates over all parameters defined in the specified object.
The method yields:
* the object contains the parameter
* the name of the parameter
* the parameter object
:arg obj: object containing parameters
"""
for name, parameter in getattr(obj, '__parameters__', {}).items():
try:
subobj = parameter.__get__(obj) # getattr(obj, name, None)
except AttributeError:
subobj = None
if subobj is not None:
yield from iter_parameters(subobj)
yield obj, name, parameter
def iter_values(obj):
"""
Recursively iterates over all values defined for all parameters in the
specified object.
The method yields:
* the object from which the value belongs
* the name of the parameter with this value
* the value
:arg obj: object containing parameters
:arg keep_frozen: whether to return frozen values
"""
for baseobj, name, parameter in iter_parameters(obj):
try:
values = np.array(parameter.__get__(baseobj), ndmin=1)
except AttributeError: # No value
continue
for value in values:
if hasattr(value, '__parameters__'):
continue
yield baseobj, name, value
def iter_getters(obj):
params = ()
return _iter_getters(obj, params)
def _getter(params):
def _inside(obj):
for param in params:
try:
obj = param.__get__(obj)
except AttributeError:
obj = None
return obj
return _inside
def _iter_getters(obj, params):
for name, parameter in obj.__parameters__.items():
newparams = params + (parameter,)
try:
subobj = parameter.__get__(obj)
except AttributeError:
continue
if hasattr(subobj, '__parameters__'):
yield from _iter_getters(subobj, newparams)
else:
name = '.'.join(map(attrgetter('name'), newparams))
yield name, _getter(newparams)
def freeze(obj):
"""
Recursively freezes all parameters in the specified object.
:arg obj: object containing parameters
"""
for baseobj, _, parameter in iter_parameters(obj):
parameter.freeze(baseobj)
class Expander(object):
"""
Expands an parameterized object based on all possible combinations of
parameter/values.
"""
def expand(self, obj):
"""
Returns a list of the specified object where only one value is defined
for each parameter.
The function computes all possible combinations of parameter/values.
:arg obj: object containing parameters
"""
obj = copy.deepcopy(obj)
parameter_values, parameter_obj_ids = \
self._create_parameter_values_dict(obj)
if not parameter_values:
return [obj]
combinations, parameter_objs, parameters = \
self._create_combinations(parameter_values, parameter_obj_ids)
objs = self._create_objects(obj, combinations, parameter_objs, parameters)
return objs
def is_expandable(self, obj):
parameter_values, _ = self._create_parameter_values_dict(obj)
return bool(parameter_values)
def _create_parameter_values_dict(self, obj):
parameter_values = {}
parameter_obj_ids = {}
for parameter_obj, _name, parameter in iter_parameters(obj):
try:
values = parameter.__get__(parameter_obj, simplify=False)
except AttributeError: # No value
continue
if values.size < 2:
continue
parameter_obj_id = id(parameter_obj) # Use id in case baseobj is not hashable
parameter_values[(parameter_obj_id, parameter)] = values.tolist()
parameter_obj_ids[parameter_obj_id] = parameter_obj
return parameter_values, parameter_obj_ids
def _create_combinations(self, parameter_values, parameter_obj_ids):
combinations, names, _varied = combine(parameter_values)
parameter_objs = list(map(parameter_obj_ids.get, map(itemgetter(0), names)))
parameters = list(map(itemgetter(1), names))
return combinations, parameter_objs, parameters
def _create_objects(self, baseobj, combinations, parameter_objs, parameters):
objs = []
for combination in combinations:
for parameter_obj, parameter, value in zip(parameter_objs, parameters, combination):
parameter.__set__(parameter_obj, value)
objs.append(copy.deepcopy(baseobj))
return objs
_root_expander = Expander()
expand = _root_expander.expand
| [
"devnull@localhost"
] | devnull@localhost |
529d1708aadd414f217458769cc1134d4712d1e0 | 67e317d203ba478f0dda6d9014b1daa03acee080 | /nidm/workflows/ProcessExecution.py | 6be6844f43ea864b6b151d88ff96358edf493717 | [
"Apache-2.0"
] | permissive | tvanerp/PyNIDM | ec074dee9550dee91b21339c78105e8bf661cb6b | 6a94875969c6bc5247b09d7d2793ed979b18ab3f | refs/heads/master | 2020-07-25T16:54:03.905301 | 2019-09-13T23:56:18 | 2019-09-13T23:56:18 | 208,361,857 | 0 | 0 | NOASSERTION | 2019-09-13T23:23:06 | 2019-09-13T23:23:05 | null | UTF-8 | Python | false | false | 1,235 | py | import prov.model as pm
from ..core import Constants
from ..experiment.Core import Core
from ..experiment.Core import getUUID
class ProcessExecution(pm.ProvActivity, Core):
"""Class for NIDM-Workflow ProcessExecution Objects.
Default constructor uses empty graph with namespaces added from
NIDM/Scripts/Constants.py. Additional alternate constructors for
user-supplied graphs and default namespaces (i.e. from Constants.py)
and user-supplied graph and namespaces
"""
def __init__(self, parentDoc=None, attributes=None):
"""
Default contructor, creates document and adds Process activity to graph
with optional attributes
:param parentDoc: optional ProvDocument
:param attributes: optional dictionary of attributes to add
"""
#set graph document
if (parentDoc):
self.graph = parentDoc
else:
self.graph = Constants.p_graph
#execute default parent class constructor
super(ProcessExecution, self).__init__(self.graph,
pm.PROV[getUUID()],
attributes)
self.graph._add_record(self)
| [
"[email protected]"
] | |
8aa4f99dfc142943b8b42bf343e240017caf68eb | 40c6f8449f25d30b16510d6b6da3893e5eae3641 | /shorts/urls.py | 60cdd103cf7055a38d253710c377d68d0a5a68c5 | [] | no_license | fergalmoran/shortio | b2188df44ebf08455ffd150fb6234dbff582f3c8 | 575dfd8438b37f383e1fc865baf5b7ad65e788ee | refs/heads/master | 2020-03-29T13:03:02.682420 | 2014-04-17T22:10:41 | 2014-04-17T22:10:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | from django.conf.urls import patterns, url, include
from shorts import views
from .api import UserList, UserDetail
from .api import UrlList, UrlDetail, UserUrlList
user_urls = patterns(
'',
url(r'^/(?P<username>[0-9a-zA-Z_-]+)/urlss$',
UserUrlList.as_view(), name='userurl-list'),
url(r'^/(?P<username>[0-9a-zA-Z_-]+)$',
UserDetail.as_view(), name='user-detail'),
url(r'^$', UserList.as_view(), name='user-list')
)
urls_urls = patterns(
'',
url(r'^/(?P<pk>\d+)$', UrlDetail.as_view(), name='urls-detail'),
url(r'^$', UrlList.as_view(), name='urls-list')
)
urlpatterns = patterns(
'',
url(r'^users', include(user_urls)),
url(r'^urls', include(urls_urls)),
url(r'^$', views.index, name='index'),
url(r'^create', views.create, name='create'),
url(r'^(?P<url_id>\d+)/$', views.detail, name='detail')
)
| [
"[email protected]"
] | |
628022e1b0203108c42330f824295c40095a5238 | 0b312224bd5a9e6b1dd92b78ccf58049b5d69b1b | /compounds/migrations/0022_auto_20180724_2343.py | f8ceb737b47e0233ffe92b7a56a38ba85a895549 | [] | no_license | paulosjd/frags | e573cc9bc373a7e0847985478b5bf0bfca9b7153 | 4af65c7415dbbfa0a92f308bf93d5734c3583c5e | refs/heads/master | 2020-03-17T00:58:15.530581 | 2018-12-12T23:48:15 | 2018-12-12T23:48:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 763 | py | # Generated by Django 2.0.4 on 2018-07-24 21:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('compounds', '0021_userbioactive'),
]
operations = [
migrations.AddField(
model_name='compoundsource',
name='user_bioactive',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='compounds.UserBioactive'),
),
migrations.AlterField(
model_name='compoundsource',
name='user_odorant',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='compounds.UserOdorant'),
),
]
| [
"[email protected]"
] | |
3cf3ebd056bcb46c29d75f30833eea9c8d1dddc6 | 33110fa5ad8c47e31401769086a985eea1a991c7 | /mmsegmentation/tests/test_data/test_dataset.py | 3ebd20e28ed6168d7746eb9e04e12c532d11f73c | [
"Apache-2.0"
] | permissive | UESTC-Liuxin/SKMT | 32bc2781063de1da2a778659e6501762531b15a8 | 377bbe3e5d2777d6c3ccaae7a6c364bd9c85d651 | refs/heads/master | 2023-01-12T19:28:49.340298 | 2020-11-16T03:35:09 | 2020-11-16T03:35:09 | 283,365,017 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,658 | py | import os.path as osp
from unittest.mock import MagicMock, patch
import numpy as np
import pytest
from mmseg.core.evaluation import get_classes, get_palette
from mmseg.datasets import (ADE20KDataset, CityscapesDataset, ConcatDataset,
CustomDataset, PascalVOCDataset, RepeatDataset,USDataset,SkmtDataset)
def test_classes():
assert list(CityscapesDataset.CLASSES) == get_classes('cityscapes')
assert list(PascalVOCDataset.CLASSES) == get_classes('voc') == get_classes(
'pascal_voc')
assert list(
ADE20KDataset.CLASSES) == get_classes('ade') == get_classes('ade20k')
with pytest.raises(ValueError):
get_classes('unsupported')
def test_palette():
assert CityscapesDataset.PALETTE == get_palette('cityscapes')
assert PascalVOCDataset.PALETTE == get_palette('voc') == get_palette(
'pascal_voc')
assert ADE20KDataset.PALETTE == get_palette('ade') == get_palette('ade20k')
with pytest.raises(ValueError):
get_palette('unsupported')
@patch('mmseg.datasets.CustomDataset.load_annotations', MagicMock)
@patch('mmseg.datasets.CustomDataset.__getitem__',
MagicMock(side_effect=lambda idx: idx))
def test_dataset_wrapper():
# CustomDataset.load_annotations = MagicMock()
# CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)
dataset_a = CustomDataset(img_dir=MagicMock(), pipeline=[])
len_a = 10
dataset_a.img_infos = MagicMock()
dataset_a.img_infos.__len__.return_value = len_a
dataset_b = CustomDataset(img_dir=MagicMock(), pipeline=[])
len_b = 20
dataset_b.img_infos = MagicMock()
dataset_b.img_infos.__len__.return_value = len_b
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset[5] == 5
assert concat_dataset[25] == 15
assert len(concat_dataset) == len(dataset_a) + len(dataset_b)
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset[5] == 5
assert repeat_dataset[15] == 5
assert repeat_dataset[27] == 7
assert len(repeat_dataset) == 10 * len(dataset_a)
def test_custom_dataset():
# img_norm_cfg = dict(
# mean=[123.675, 116.28, 103.53],
# std=[58.395, 57.12, 57.375],
# to_rgb=True)
# crop_size = (512, 1024)
# train_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(type='LoadAnnotations'),
# dict(type='Resize', img_scale=(128, 256), ratio_range=(0.5, 2.0)),
# dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
# dict(type='RandomFlip', flip_ratio=0.5),
# dict(type='PhotoMetricDistortion'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
# dict(type='DefaultFormatBundle'),
# dict(type='Collect', keys=['img', 'gt_semantic_seg']),
# ]
# test_pipeline = [
# dict(type='LoadImageFromFile'),
# dict(
# type='MultiScaleFlipAug',
# img_scale=(128, 256),
# # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75],
# flip=False,
# transforms=[
# dict(type='Resize', keep_ratio=True),
# dict(type='RandomFlip'),
# dict(type='Normalize', **img_norm_cfg),
# dict(type='ImageToTensor', keys=['img']),
# dict(type='Collect', keys=['img']),
# ])
# ]
#
# # with img_dir and ann_dir
# train_dataset = CustomDataset(
# train_pipeline,
# data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
# img_dir='imgs/',
# ann_dir='gts/',
# img_suffix='img.jpg',
# seg_map_suffix='gt.png')
# assert len(train_dataset) == 5
#
# # with img_dir, ann_dir, split
# train_dataset = CustomDataset(
# train_pipeline,
# data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
# img_dir='imgs/',
# ann_dir='gts/',
# img_suffix='img.jpg',
# seg_map_suffix='gt.png',
# split='splits/train.txt')
# assert len(train_dataset) == 4
#
# # no data_root
# train_dataset = CustomDataset(
# train_pipeline,
# img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
# ann_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts'),
# img_suffix='img.jpg',
# seg_map_suffix='gt.png')
# assert len(train_dataset) == 5
#
# # with data_root but img_dir/ann_dir are abs path
# train_dataset = CustomDataset(
# train_pipeline,
# data_root=osp.join(osp.dirname(__file__), '../data/pseudo_dataset'),
# img_dir=osp.abspath(
# osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs')),
# ann_dir=osp.abspath(
# osp.join(osp.dirname(__file__), '../data/pseudo_dataset/gts')),
# img_suffix='img.jpg',
# seg_map_suffix='gt.png')
# assert len(train_dataset) == 5
#
# # test_mode=True
# test_dataset = CustomDataset(
# test_pipeline,
# img_dir=osp.join(osp.dirname(__file__), '../data/pseudo_dataset/imgs'),
# img_suffix='img.jpg',
# test_mode=True)
# assert len(test_dataset) == 5
#
# # training data get
# train_data = train_dataset[0]
# assert isinstance(train_data, dict)
#
# # test data get
# test_data = test_dataset[0]
# assert isinstance(test_data, dict)
#
# # get gt seg map
# gt_seg_maps = train_dataset.get_gt_seg_maps()
# assert len(gt_seg_maps) == 5
# dataset settings
data_root = '/media/Program/CV/Project/SKMT/mmsegmentation/data/VOCdevkit/Seg/skmt5'
# data_root = '/media/Program/CV/Project/SKMT/mmsegmentation/data/VOCdevkit/VOC2012'
# data_root = '/media/Program/CV/Project/SKMT/mmsegmentation/data/VOCdevkit/US_dataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
crop_size = (512, 512)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations'),
dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)),
dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='PhotoMetricDistortion'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_semantic_seg']),
]
# with img_dir and ann_dir
train_dataset = USDataset(
split='ImageSets/Segmentation/train.txt',
data_root=data_root,
img_dir='JPEGImages',
ann_dir='SegmentationClass',
pipeline=train_pipeline
)
# get gt seg map
gt_seg_maps = train_dataset.get_gt_seg_maps()
# evaluation
pseudo_results = []
for gt_seg_map in gt_seg_maps:
h, w = gt_seg_map.shape
pseudo_results.append(np.random.randint(low=0, high=16, size=(h, w)))
eval_results = train_dataset.evaluate(pseudo_results)
assert isinstance(eval_results, dict)
assert 'mIoU' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
# evaluation with CLASSES
train_dataset.CLASSES = tuple(['a'] * 16)
eval_results = train_dataset.evaluate(pseudo_results)
assert isinstance(eval_results, dict)
assert 'mIoU' in eval_results
assert 'mAcc' in eval_results
assert 'aAcc' in eval_results
test_custom_dataset() | [
"[email protected]"
] | |
3d513b4d49ec184a8c212f0e9e39bded5560e491 | a9e60d0e5b3b5062a81da96be2d9c748a96ffca7 | /configurations/i21-config/scripts/scannable/waveform_channel/BinpointWaveformChannelController.py | 306459920ad78dce613a3566b8f00496e66b5507 | [] | no_license | openGDA/gda-diamond | 3736718596f47607335ada470d06148d7b57526e | bbb64dcfd581c30eddb210c647db5b5864b59166 | refs/heads/master | 2023-08-16T08:01:11.075927 | 2023-08-15T16:01:52 | 2023-08-15T16:01:52 | 121,757,699 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,892 | py | """
define a Binpoint class to control data collection during continuous move.
Note that the Binpoint device is slaved from the ADC_ACQ_GRP, therefore there is no concept of exposure time.
However collection time is required for data pulling stream timing in order to retrieve collected data in
a more or less synchronised fashion between different channels.
@author: Fajin Yuan
@organization: Diamond Light Source Ltd
@since: 25 August 2020
"""
from gda.epics import CAClient
from scannable.waveform_channel.WaveformChannelPollingInputStream import WaveformChannelPollingInputStream
from org.slf4j import LoggerFactory
import installation
TIMEOUT = 5
class BinpointWaveformChannelController(object):
def __init__(self, name, binpoint_root_pv):
self.logger = LoggerFactory.getLogger("BinpointWaveformChannelController:%s" % name)
self.verbose = False
self.name = name
#ADC_ACQ_GRP in EPICS doing the Binpoint reset comes after PGME waveform reset
self.pv_reset = CAClient(binpoint_root_pv + 'BPTS:BINPOINTALL:RESET.PROC')
self.binpoint_root_pv = binpoint_root_pv
self.configure()
self.exposure_time = 1
self.number_of_positions = 0
self.started = False
self.hardware_trigger_provider=None
self.stream=None
def set_hardware_trigger_provider(self, hardwareTriggerProvider):
self.hardware_trigger_provider=hardwareTriggerProvider
def get_hardware_trigger_provider(self):
return self.hardware_trigger_provider
def configure(self):
if self.verbose: self.logger.info("%s %s" % (self.name,'configure()...'))
if installation.isLive():
self.pv_reset.configure()
def erase(self):
if self.verbose: self.logger.info("%s %s" % (self.name,'erase()...'))
self.started = False
if self.verbose: self.logger.info("%s %s" % (self.name,'...erase()'))
def erase_and_start(self):
if self.verbose: self.logger.info("%s %s" % (self.name,'erase_and_start()...'))
if installation.isLive():
self.pv_reset.caput(1)
self.started = True
if self.verbose: self.logger.info("%s %s" % (self.name,'...erase_and_start()'))
def stop(self):
if self.verbose: self.logger.info("%s %s" % (self.name,'stop()...'))
# Binpoint has no stop, since it is slaved from the ADC.
if self.stream:
self.stream.stop()
self.started = False # added after I10-145
if self.verbose: self.logger.info("%s %s" % (self.name,'...stop()'))
# Provide functions to configure WaveformChannelScannable
def getChannelInputStream(self, channel_pv_suffix):
# Channel suffix assumes trailing :
self.stream = WaveformChannelPollingInputStream(self, channel_pv_suffix)
# TODO: Investigate if the NLAST.B can be listened to, if so we can avoid using this polling class
self.stream.verbose = self.verbose
return self.stream
def getChannelInputStreamFormat(self):
return '%f'
# Provide functions to configure WaveformChannelPollingInputStream
def getChannelInputStreamType(self):
return float
def getChannelInputStreamCAClients(self, channel_pv_suffix):
if installation.isLive():
pv_waveform = CAClient(self.binpoint_root_pv + channel_pv_suffix + 'BINPOINT')
pv_count = CAClient(self.binpoint_root_pv + channel_pv_suffix + 'BINPOINT:NLAST.B')
else:
pv_waveform = []
pv_count = self.number_of_positions
return pv_waveform, pv_count
def getExposureTime(self):
return self.exposure_time
def getChannelInputStreamAcquiring(self):
#return true when continuous move started
return self.started and self.hardware_trigger_provider.continuousMovingStarted
| [
"[email protected]"
] | |
6c8c3176d6fab6f847718ff9bf0b86f79b2e7b9f | 1fe8d4133981e53e88abf633046060b56fae883e | /venv/lib/python3.8/site-packages/tensorflow/python/tpu/tpu_strategy_util.py | 7c6396205ab5e127f88ed702d0b0bdcaa2a13c21 | [] | no_license | Akira331/flask-cifar10 | 6c49db8485038731ce67d23f0972b9574746c7a7 | 283e7a2867c77d4b6aba7aea9013bf241d35d76c | refs/heads/master | 2023-06-14T16:35:06.384755 | 2021-07-05T14:09:15 | 2021-07-05T14:09:15 | 382,864,970 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:3ea6cc5b52461659dafea8bd5247a3e94f163ecadc1e3f2e4dc1b668a5ca730e
size 8774
| [
"[email protected]"
] | |
2b0895d4db4313398af8c77ebcb6a061bcb4237a | 73e07f0dc3d8b8625105c1528746c91e382567ed | /tests/__init__.py | 79c17586ae066ff7d284c0c88d19930680dae095 | [
"MIT"
] | permissive | econchick/attrs | d10114f0e838ef0b63aadf5055f3e4a482cd0850 | 6a1a740c46e3071296eaa7b64d0120913ddadade | refs/heads/master | 2021-01-21T00:22:29.237367 | 2015-02-07T09:06:08 | 2015-02-07T09:06:08 | 30,468,084 | 0 | 0 | null | 2015-02-07T19:54:49 | 2015-02-07T19:54:49 | null | UTF-8 | Python | false | false | 1,181 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from attr import Attribute
from attr._make import NOTHING, make_class
def simple_class(no_cmp=True, no_repr=True, no_hash=True):
"""
Return a new simple class.
"""
return make_class(
"C", ["a", "b"],
no_cmp=no_cmp, no_repr=no_repr, no_hash=no_hash, no_init=False,
)
def simple_attr(name, default=NOTHING, validator=None, no_repr=False,
no_cmp=False, no_hash=False, no_init=False):
"""
Return an attribute with a name and no other bells and whistles.
"""
return Attribute(
name=name, default=default, validator=validator, no_repr=no_repr,
no_cmp=no_cmp, no_hash=no_hash, no_init=no_init
)
class TestSimpleClass(object):
"""
Tests for the testing helper function `make_class`.
"""
def test_returns_class(self):
"""
Returns a class object.
"""
assert type is simple_class().__class__
def returns_distinct_classes(self):
"""
Each call returns a completely new class.
"""
assert simple_class() is not simple_class()
| [
"[email protected]"
] | |
63abffd0d5f913554789ad7d511d77f209c117cc | 6aa36fee3f4fcc9ac8f5509e51ea6bd8fc05b39b | /virtualenv-flask/lib/python2.7/site-packages/cybox/objects/win_task_object.py | 6349e56f3ccb9792cf8b1ede28ff5f69ce019358 | [] | no_license | syn-ack-zack/msg-stix-parser | 8c46c4d897d579162f224360a077ac42f28ffe89 | 1edb7c3b6d60f76f24b91830a1ae7076d46ede14 | refs/heads/master | 2021-03-27T15:01:07.344754 | 2016-09-30T16:43:22 | 2016-09-30T16:43:22 | 69,684,161 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,857 | py | # Copyright (c) 2013, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
import cybox
import cybox.bindings.win_task_object as win_task_binding
from cybox.common import (Base64Binary, DateTime, Duration, HashList, Long,
ObjectProperties, String, UnsignedLong)
from cybox.objects.email_message_object import EmailMessage
class Trigger(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.TriggerType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
trigger_begin = cybox.TypedField("Trigger_Begin", DateTime)
trigger_delay = cybox.TypedField("Trigger_Delay", Duration)
trigger_end = cybox.TypedField("Trigger_End", DateTime)
trigger_frequency = cybox.TypedField("Trigger_Frequency", String)
trigger_max_run_time = cybox.TypedField("Trigger_Max_Run_Time", Duration)
trigger_session_change_type = cybox.TypedField(
"Trigger_Session_Change_Type", String)
#TODO: add Trigger_Type (see CybOXProject/schemas issue #76)
class TriggerList(cybox.EntityList):
_binding = win_task_binding
_binding_class = win_task_binding.TriggerListType
_binding_var = 'Trigger'
_contained_type = Trigger
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
class IComHandlerAction(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.IComHandlerActionType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
com_data = cybox.TypedField("COM_Data", String)
com_class_id = cybox.TypedField("COM_Class_ID", String)
class IExecAction(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.IExecActionType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
exec_arguments = cybox.TypedField("Exec_Arguments", String)
exec_program_path = cybox.TypedField("Exec_Program_Path", String)
exec_working_directory = cybox.TypedField("Exec_Working_Directory", String)
exec_program_hashes = cybox.TypedField("Exec_Program_Hashes", HashList)
class IShowMessageAction(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.IShowMessageActionType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
show_message_body = cybox.TypedField("Show_Message_Body", String)
show_message_title = cybox.TypedField("Show_Message_Title", String)
class TaskAction(cybox.Entity):
_binding = win_task_binding
_binding_class = win_task_binding.TaskActionType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
action_type = cybox.TypedField("Action_Type", String)
action_id = cybox.TypedField("Action_ID", String)
iemailaction = cybox.TypedField("IEmailAction", EmailMessage)
icomhandleraction = cybox.TypedField("IComHandlerAction",
IComHandlerAction)
iexecaction = cybox.TypedField("IExecAction", IExecAction)
ishowmessageaction = cybox.TypedField("IShowMessageAction",
IShowMessageAction)
class TaskActionList(cybox.EntityList):
_binding = win_task_binding
_binding_class = win_task_binding.TaskActionListType
_binding_var = 'Action'
_contained_type = TaskAction
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
class WinTask(ObjectProperties):
_binding = win_task_binding
_binding_class = win_task_binding.WindowsTaskObjectType
_namespace = 'http://cybox.mitre.org/objects#WinTaskObject-2'
_XSI_NS = "WinTaskObj"
_XSI_TYPE = "WindowsTaskObjectType"
status = cybox.TypedField("Status", String)
priority = cybox.TypedField("Priority", String)
name = cybox.TypedField("Name", String)
application_name = cybox.TypedField("Application_Name", String)
parameters = cybox.TypedField("Parameters", String)
flags = cybox.TypedField("Flags", String)
account_name = cybox.TypedField("Account_Name", String)
account_run_level = cybox.TypedField("Account_Run_Level", String)
account_logon_type = cybox.TypedField("Account_Logon_Type", String)
creator = cybox.TypedField("Creator", String)
creation_date = cybox.TypedField("Creation_Date", DateTime)
most_recent_run_time = cybox.TypedField("Most_Recent_Run_Time", DateTime)
exit_code = cybox.TypedField("Exit_Code", Long)
max_run_time = cybox.TypedField("Max_Run_Time", UnsignedLong)
next_run_time = cybox.TypedField("Next_Run_Time", DateTime)
action_list = cybox.TypedField("Action_List", TaskActionList)
trigger_list = cybox.TypedField("Trigger_List", TriggerList)
comment = cybox.TypedField("Comment", String)
working_directory = cybox.TypedField("Working_Directory", String)
work_item_data = cybox.TypedField("Work_Item_Data", Base64Binary)
| [
"[email protected]"
] | |
6a89ebca4f0ef920b63c07807d9ea8970a5dca97 | 7ef2308e51d1d5700fbd092177ee15e2a03ebdd8 | /DisasterCrawler/ZHNewsCrawlerPostgreSql/gooseker/gooseeker.py | 95f8c3f6537f0b93bfdd207b0375084375a77bfa | [] | no_license | STAWZW/STAWZW1.0 | 741002eb35c2883e5739fee8d14ff430e9622c01 | a835ac27aba17f968116e321bd201b26c9fb3578 | refs/heads/master | 2020-07-21T20:21:59.753992 | 2019-09-26T09:21:28 | 2019-09-26T09:21:28 | 206,965,347 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# 模块名: gooseeker
# 类名: gsExtractor
# Version: 2.0
# 说明: html内容提取器
# 功能: 使用xslt作为模板,快速提取HTML DOM中的内容。
# released by 集搜客(http://www.gooseeker.com) on May 18, 2016
# github: https://github.com/FullerHua/jisou/core/gooseeker.py
from urllib import request
from urllib.parse import quote
from lxml import etree
import time
class GsExtractor(object):
def _init_(self):
self.xslt = ""
# 从文件读取xslt
def setXsltFromFile(self , xsltFilePath):
file = open(xsltFilePath , 'r' , encoding='UTF-8')
try:
self.xslt = file.read()
finally:
file.close()
# 从字符串获得xslt
def setXsltFromMem(self , xsltStr):
self.xslt = xsltStr
# 通过GooSeeker API接口获得xslt
def setXsltFromAPI(self , APIKey , theme, middle=None, bname=None):
apiurl = "http://www.gooseeker.com/api/getextractor?key="+ APIKey +"&theme="+quote(theme)
if (middle):
apiurl = apiurl + "&middle="+quote(middle)
if (bname):
apiurl = apiurl + "&bname="+quote(bname)
apiconn = request.urlopen(apiurl)
self.xslt = apiconn.read()
print(apiurl)
# 返回当前xslt
def getXslt(self):
return self.xslt
# 提取方法,入参是一个HTML DOM对象,返回是提取结果
def extract(self , html):
xslt_root = etree.XML(self.xslt)
transform = etree.XSLT(xslt_root)
result_tree = transform(html)
return result_tree | [
"[email protected]"
] | |
b7278ef00ee2684f7f141252dd31fd099d9161ac | a6894d17fdbceb56d4364f0e279d03b16a181396 | /working-env/lib/python2.5/TurboGears-1.0.2.2-py2.5.egg/turbogears/i18n/data/el_GR.py | 195ec513132505f69da61123e521240105357ad6 | [] | no_license | thraxil/gtreed | c1c5a19178c1f50ff5e61887b13ff7b004da1d25 | ca228848364edb204b15a7411fd6192379781c78 | refs/heads/master | 2020-04-18T03:02:15.468044 | 2008-12-10T20:02:12 | 2008-12-10T20:02:12 | 88,388 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,155 | py | # Formatting configuration for locale el_GR
languages={'el': u'\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac', 'en': u'\u0391\u03b3\u03b3\u03bb\u03b9\u03ba\u03ac', 'zh': u'\u039a\u03b9\u03bd\u03b5\u03b6\u03b9\u03ba\u03ac', 'ca': u'\u039a\u03b1\u03c4\u03b1\u03bb\u03b1\u03bd\u03b9\u03ba\u03ac', 'it': u'\u0399\u03c4\u03b1\u03bb\u03b9\u03ba\u03ac', 'ar': u'\u0391\u03c1\u03b1\u03b2\u03b9\u03ba\u03ac', 'cs': u'\u03a4\u03c3\u03b5\u03c7\u03b9\u03ba\u03ac', 'et': u'\u0395\u03c3\u03b8\u03bf\u03bd\u03b9\u03ba\u03ac', 'es': u'\u0399\u03c3\u03c0\u03b1\u03bd\u03b9\u03ba\u03ac', 'ru': u'\u03a1\u03c9\u03c3\u03b9\u03ba\u03ac', 'nl': u'\u039f\u03bb\u03bb\u03b1\u03bd\u03b4\u03b9\u03ba\u03ac', 'pt': u'\u03a0\u03bf\u03c1\u03c4\u03bf\u03b3\u03b1\u03bb\u03b9\u03ba\u03ac', 'no': u'\u039d\u03bf\u03c1\u03b2\u03b7\u03b3\u03b9\u03ba\u03ac', 'tr': u'\u03a4\u03bf\u03c5\u03c1\u03ba\u03b9\u03ba\u03ac', 'lv': u'\u039b\u03b5\u03c4\u03bf\u03bd\u03b9\u03ba\u03ac', 'lt': u'\u039b\u03b9\u03b8\u03bf\u03c5\u03b1\u03bd\u03b9\u03ba\u03ac', 'ro': u'\u03a1\u03bf\u03c5\u03bc\u03b1\u03bd\u03b9\u03ba\u03ac', 'pl': u'\u03a0\u03bf\u03bb\u03c9\u03bd\u03b9\u03ba\u03ac', 'fr': u'\u0393\u03b1\u03bb\u03bb\u03b9\u03ba\u03ac', 'bg': u'\u0392\u03bf\u03c5\u03bb\u03b3\u03b1\u03c1\u03b9\u03ba\u03ac', 'hr': u'\u039a\u03c1\u03bf\u03b1\u03c4\u03b9\u03ba\u03ac', 'de': u'\u0393\u03b5\u03c1\u03bc\u03b1\u03bd\u03b9\u03ba\u03ac', 'da': u'\u0394\u03b1\u03bd\u03b9\u03ba\u03ac', 'fi': u'\u03a6\u03b9\u03bd\u03bb\u03b1\u03bd\u03b4\u03b9\u03ba\u03ac', 'hu': u'\u039f\u03c5\u03b3\u03b3\u03c1\u03b9\u03ba\u03ac', 'ja': u'\u0399\u03b1\u03c0\u03c9\u03bd\u03b9\u03ba\u03ac', 'he': u'\u0395\u03b2\u03c1\u03b1\u03ca\u03ba\u03ac', 'sr': u'\u03a3\u03b5\u03c1\u03b2\u03b9\u03ba\u03ac', 'sq': u'\u0391\u03bb\u03b2\u03b1\u03bd\u03b9\u03ba\u03ac', 'ko': u'\u039a\u03bf\u03c1\u03b5\u03b1\u03c4\u03b9\u03ba\u03ac', 'sv': u'\u03a3\u03bf\u03c5\u03b7\u03b4\u03b9\u03ba\u03ac', 'mk': u'\u03a3\u03bb\u03b1\u03b2\u03bf\u03bc\u03b1\u03ba\u03b5\u03b4\u03bf\u03bd\u03b9\u03ba\u03ac', 'sk': u'\u03a3\u03bb\u03bf\u03b2\u03b1\u03ba\u03b9\u03ba\u03ac', 'sl': u'\u03a3\u03bb\u03bf\u03b2\u03b5\u03bd\u03b9\u03ba\u03ac'}
countries={'BD': u'\u039c\u03c0\u03b1\u03bd\u03b3\u03ba\u03bb\u03b1\u03bd\u03c4\u03ad\u03c2', 'BE': u'\u0392\u03ad\u03bb\u03b3\u03b9\u03bf', 'BF': u'\u039c\u03c0\u03bf\u03c5\u03c1\u03ba\u03af\u03bd\u03b1 \u03a6\u03ac\u03c3\u03bf', 'BG': u'\u0392\u03bf\u03c5\u03bb\u03b3\u03b1\u03c1\u03af\u03b1', 'BA': u'\u0392\u03bf\u03c3\u03bd\u03af\u03b1 - \u0395\u03c1\u03b6\u03b5\u03b3\u03bf\u03b2\u03af\u03bd\u03b7', 'BB': u'\u039c\u03c0\u03b1\u03c1\u03bc\u03c0\u03ac\u03bd\u03c4\u03bf\u03c2', 'WF': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u039f\u03c5\u03b1\u03bb\u03bb\u03af\u03c2 \u03ba\u03b1\u03b9 \u03a6\u03bf\u03c5\u03c4\u03bf\u03c5\u03bd\u03ac', 'BM': u'\u0392\u03b5\u03c1\u03bc\u03bf\u03cd\u03b4\u03b5\u03c2', 'BN': u'\u039c\u03c0\u03c1\u03bf\u03c5\u03bd\u03ad\u03b9 \u039d\u03c4\u03b1\u03c1\u03bf\u03c5\u03c3\u03b1\u03bb\u03ac\u03bc', 'BO': u'\u0392\u03bf\u03bb\u03b9\u03b2\u03af\u03b1', 'BH': u'\u039c\u03c0\u03b1\u03c7\u03c1\u03ad\u03b9\u03bd', 'BI': u'\u039c\u03c0\u03bf\u03c5\u03c1\u03bf\u03cd\u03bd\u03c4\u03b9', 'BJ': u'\u039c\u03c0\u03ad\u03bd\u03b9\u03bd', 'BT': u'\u039c\u03c0\u03bf\u03c5\u03c4\u03ac\u03bd', 'JM': u'\u03a4\u03b6\u03b1\u03bc\u03ac\u03b9\u03ba\u03b1', 'BV': u'\u039d\u03ae\u03c3\u03bf\u03c2 \u039c\u03c0\u03bf\u03c5\u03b2\u03ad', 'BW': u'\u039c\u03c0\u03bf\u03c4\u03c3\u03bf\u03c5\u03ac\u03bd\u03b1', 'WS': u'\u03a3\u03b1\u03bc\u03cc\u03b1', 'BR': u'\u0392\u03c1\u03b1\u03b6\u03b9\u03bb\u03af\u03b1', 'BS': u'\u039c\u03c0\u03b1\u03c7\u03ac\u03bc\u03b5\u03c2', 'BY': u'\u039b\u03b5\u03c5\u03ba\u03bf\u03c1\u03c9\u03c3\u03af\u03b1', 'BZ': u'\u039c\u03c0\u03b5\u03bb\u03af\u03b6', 'RU': u'\u03a1\u03c9\u03c3\u03af\u03b1', 'RW': u'\u03a1\u03bf\u03c5\u03ac\u03bd\u03c4\u03b1', 'TL': u'\u0391\u03bd\u03b1\u03c4\u03bf\u03bb\u03b9\u03ba\u03cc \u03a4\u03b9\u03bc\u03cc\u03c1', 'RE': u'\u03a1\u03b5\u03cb\u03bd\u03b9\u03cc\u03bd', 'TM': u'\u03a4\u03bf\u03c5\u03c1\u03ba\u03bc\u03b5\u03bd\u03b9\u03c3\u03c4\u03ac\u03bd', 'TJ': u'\u03a4\u03b1\u03c4\u03b6\u03b9\u03ba\u03b9\u03c3\u03c4\u03ac\u03bd', 'RO': u'\u03a1\u03bf\u03c5\u03bc\u03b1\u03bd\u03af\u03b1', 'TK': u'\u03a4\u03bf\u03ba\u03b5\u03bb\u03ac\u03bf\u03c5', 'GW': u'\u0393\u03bf\u03c5\u03b9\u03bd\u03ad\u03b1-\u039c\u03c0\u03b9\u03c3\u03ac\u03bf\u03c5', 'GU': u'\u0393\u03ba\u03bf\u03c5\u03ac\u03bc', 'GT': u'\u0393\u03bf\u03c5\u03b1\u03c4\u03b5\u03bc\u03ac\u03bb\u03b1', 'GS': u'\u039d\u03cc\u03c4\u03b9\u03b1 \u0393\u03b5\u03c9\u03c1\u03b3\u03af\u03b1 \u03ba\u03b1\u03b9 \u039d\u03ae\u03c3\u03bf\u03b9 \u039d\u03cc\u03c4\u03b9\u03b5\u03c2 \u03a3\u03ac\u03bd\u03c4\u03bf\u03c5\u03b9\u03c4\u03c2', 'GR': u'\u0395\u03bb\u03bb\u03ac\u03b4\u03b1', 'GQ': u'\u0399\u03c3\u03b7\u03bc\u03b5\u03c1\u03b9\u03bd\u03ae \u0393\u03bf\u03c5\u03b9\u03bd\u03ad\u03b1', 'GP': u'\u0393\u03bf\u03c5\u03b1\u03b4\u03b5\u03bb\u03bf\u03cd\u03c0\u03b7', 'JP': u'\u0399\u03b1\u03c0\u03c9\u03bd\u03af\u03b1', 'GY': u'\u0393\u03bf\u03c5\u03b9\u03ac\u03bd\u03b1', 'GF': u'\u0393\u03b1\u03bb\u03bb\u03b9\u03ba\u03ae \u0393\u03bf\u03c5\u03b9\u03ac\u03bd\u03b1', 'GE': u'\u0393\u03b5\u03c9\u03c1\u03b3\u03af\u03b1', 'GD': u'\u0393\u03c1\u03b5\u03bd\u03ac\u03b4\u03b1', 'GB': u'\u0397\u03bd\u03c9\u03bc\u03ad\u03bd\u03bf \u0392\u03b1\u03c3\u03af\u03bb\u03b5\u03b9\u03bf', 'GA': u'\u0393\u03ba\u03b1\u03bc\u03c0\u03cc\u03bd', 'SV': u'\u0395\u03bb \u03a3\u03b1\u03bb\u03b2\u03b1\u03b4\u03cc\u03c1', 'GN': u'\u0393\u03bf\u03c5\u03b9\u03bd\u03ad\u03b1', 'GM': u'\u0393\u03ba\u03ac\u03bc\u03c0\u03b9\u03b1', 'GL': u'\u0393\u03c1\u03bf\u03b9\u03bb\u03b1\u03bd\u03b4\u03af\u03b1', 'GI': u'\u0393\u03b9\u03b2\u03c1\u03b1\u03bb\u03c4\u03ac\u03c1', 'GH': u'\u0393\u03ba\u03ac\u03bd\u03b1', 'OM': u'\u039f\u03bc\u03ac\u03bd', 'TN': u'\u03a4\u03c5\u03bd\u03b7\u03c3\u03af\u03b1', 'JO': u'\u0399\u03bf\u03c1\u03b4\u03b1\u03bd\u03af\u03b1', 'SP': u'\u03a3\u03b5\u03c1\u03b2\u03af\u03b1', 'HR': u'\u039a\u03c1\u03bf\u03b1\u03c4\u03af\u03b1', 'HT': u'\u0391\u03ca\u03c4\u03ae', 'HU': u'\u039f\u03c5\u03b3\u03b3\u03b1\u03c1\u03af\u03b1', 'HK': u'\u03a7\u03bf\u03bd\u03b3\u03ba \u039a\u03bf\u03bd\u03b3\u03ba, \u0395\u03b9\u03b4\u03b9\u03ba\u03ae \u0394\u03b9\u03bf\u03b9\u03ba\u03b7\u03c4\u03b9\u03ba\u03ae \u03a0\u03b5\u03c1\u03b9\u03c6\u03ad\u03c1\u03b5\u03b9\u03b1 \u03c4\u03b7\u03c2 \u039a\u03af\u03bd\u03b1\u03c2', 'HN': u'\u039f\u03bd\u03b4\u03bf\u03cd\u03c1\u03b1', 'HM': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u03a7\u03b5\u03c1\u03bd\u03c4 \u03ba\u03b1\u03b9 \u039c\u03b1\u03ba\u03bd\u03c4\u03cc\u03bd\u03b1\u03bb\u03bd\u03c4', 'VE': u'\u0392\u03b5\u03bd\u03b5\u03b6\u03bf\u03c5\u03ad\u03bb\u03b1', 'PR': u'\u03a0\u03bf\u03c5\u03ad\u03c1\u03c4\u03bf \u03a1\u03af\u03ba\u03bf', 'PS': u'\u03a0\u03b1\u03bb\u03b1\u03b9\u03c3\u03c4\u03b9\u03bd\u03b9\u03b1\u03ba\u03ac \u0395\u03b4\u03ac\u03c6\u03b7', 'PW': u'\u03a0\u03b1\u03bb\u03ac\u03bf\u03c5', 'PT': u'\u03a0\u03bf\u03c1\u03c4\u03bf\u03b3\u03b1\u03bb\u03af\u03b1', 'SJ': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u03a3\u03b2\u03ac\u03bb\u03bc\u03c0\u03b1\u03c1 \u03ba\u03b1\u03b9 \u0393\u03b9\u03b1\u03bd \u039c\u03b1\u03b3\u03b9\u03ad\u03bd', 'PY': u'\u03a0\u03b1\u03c1\u03b1\u03b3\u03bf\u03c5\u03ac\u03b7', 'IQ': u'\u0399\u03c1\u03ac\u03ba', 'PA': u'\u03a0\u03b1\u03bd\u03b1\u03bc\u03ac\u03c2', 'PF': u'\u0393\u03b1\u03bb\u03bb\u03b9\u03ba\u03ae \u03a0\u03bf\u03bb\u03c5\u03bd\u03b7\u03c3\u03af\u03b1', 'PG': u'\u03a0\u03b1\u03c0\u03bf\u03cd\u03b1 - \u039d\u03ad\u03b1 \u0393\u03bf\u03c5\u03b9\u03bd\u03ad\u03b1', 'PE': u'\u03a0\u03b5\u03c1\u03bf\u03cd', 'PK': u'\u03a0\u03b1\u03ba\u03b9\u03c3\u03c4\u03ac\u03bd', 'PH': u'\u03a6\u03b9\u03bb\u03b9\u03c0\u03c0\u03af\u03bd\u03b5\u03c2', 'PN': u'\u03a0\u03af\u03c4\u03ba\u03b5\u03c1\u03bd', 'PL': u'\u03a0\u03bf\u03bb\u03c9\u03bd\u03af\u03b1', 'PM': u'\u03a3\u03b1\u03b9\u03bd\u03c4 \u03a0\u03b9\u03ad\u03c1 \u03ba\u03b1\u03b9 \u039c\u03b9\u03ba\u03b5\u03bb\u03cc\u03bd', 'ZM': u'\u0396\u03ac\u03bc\u03c0\u03b9\u03b1', 'EH': u'\u0394\u03c5\u03c4\u03b9\u03ba\u03ae \u03a3\u03b1\u03c7\u03ac\u03c1\u03b1', 'EE': u'\u0395\u03c3\u03b8\u03bf\u03bd\u03af\u03b1', 'EG': u'\u0391\u03af\u03b3\u03c5\u03c0\u03c4\u03bf\u03c2', 'ZA': u'\u039d\u03cc\u03c4\u03b9\u03b1 \u0391\u03c6\u03c1\u03b9\u03ba\u03ae', 'EC': u'\u0399\u03c3\u03b7\u03bc\u03b5\u03c1\u03b9\u03bd\u03cc\u03c2', 'IT': u'\u0399\u03c4\u03b1\u03bb\u03af\u03b1', 'VN': u'\u0392\u03b9\u03b5\u03c4\u03bd\u03ac\u03bc', 'SB': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u03a3\u03bf\u03bb\u03bf\u03bc\u03ce\u03bd\u03c4\u03bf\u03c2', 'ET': u'\u0391\u03b9\u03b8\u03b9\u03bf\u03c0\u03af\u03b1', 'SO': u'\u03a3\u03bf\u03bc\u03b1\u03bb\u03af\u03b1', 'ZW': u'\u0396\u03b9\u03bc\u03c0\u03ac\u03bc\u03c0\u03bf\u03c5\u03b5', 'SA': u'\u03a3\u03b1\u03bf\u03c5\u03b4\u03b9\u03ba\u03ae \u0391\u03c1\u03b1\u03b2\u03af\u03b1', 'ES': u'\u0399\u03c3\u03c0\u03b1\u03bd\u03af\u03b1', 'ER': u'\u0395\u03c1\u03c5\u03b8\u03c1\u03b1\u03af\u03b1', 'MD': u'\u039c\u03bf\u03bb\u03b4\u03b1\u03b2\u03af\u03b1, \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1 \u03c4\u03b7\u03c2', 'MG': u'\u039c\u03b1\u03b4\u03b1\u03b3\u03b1\u03c3\u03ba\u03ac\u03c1\u03b7', 'MA': u'\u039c\u03b1\u03c1\u03cc\u03ba\u03bf', 'MC': u'\u039c\u03bf\u03bd\u03b1\u03ba\u03cc', 'UZ': u'\u039f\u03c5\u03b6\u03bc\u03c0\u03b5\u03ba\u03b9\u03c3\u03c4\u03ac\u03bd', 'MM': u'\u039c\u03b9\u03b1\u03bd\u03bc\u03ac\u03c1', 'ML': u'\u039c\u03ac\u03bb\u03b9', 'MO': u'\u039c\u03b1\u03ba\u03ac\u03bf, \u0395\u03b9\u03b4\u03b9\u03ba\u03ae \u0394\u03b9\u03bf\u03b9\u03ba\u03b7\u03c4\u03b9\u03ba\u03ae \u03a0\u03b5\u03c1\u03b9\u03c6\u03ad\u03c1\u03b5\u03b9\u03b1 \u03c4\u03b7\u03c2 \u039a\u03af\u03bd\u03b1\u03c2', 'MN': u'\u039c\u03bf\u03b3\u03b3\u03bf\u03bb\u03af\u03b1', 'MH': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u039c\u03ac\u03c1\u03c3\u03b1\u03bb', 'MK': u'\u03a0\u0393\u0394 \u039c\u03b1\u03ba\u03b5\u03b4\u03bf\u03bd\u03af\u03b1\u03c2', 'MU': u'\u039c\u03b1\u03c5\u03c1\u03af\u03ba\u03b9\u03bf\u03c2', 'MT': u'\u039c\u03ac\u03bb\u03c4\u03b1', 'MW': u'\u039c\u03b1\u03bb\u03ac\u03bf\u03c5\u03b9', 'MV': u'\u039c\u03b1\u03bb\u03b4\u03af\u03b2\u03b5\u03c2', 'MQ': u'\u039c\u03b1\u03c1\u03c4\u03b9\u03bd\u03af\u03ba\u03b1', 'MP': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u0392\u03cc\u03c1\u03b5\u03b9\u03b5\u03c2 \u039c\u03b1\u03c1\u03b9\u03ac\u03bd\u03b5\u03c2', 'MS': u'\u039c\u03bf\u03bd\u03c3\u03b5\u03c1\u03ac\u03c4', 'MR': u'\u039c\u03b1\u03c5\u03c1\u03b9\u03c4\u03b1\u03bd\u03af\u03b1', 'UG': u'\u039f\u03c5\u03b3\u03ba\u03ac\u03bd\u03c4\u03b1', 'MY': u'\u039c\u03b1\u03bb\u03b1\u03b9\u03c3\u03af\u03b1', 'MX': u'\u039c\u03b5\u03be\u03b9\u03ba\u03cc', 'IL': u'\u0399\u03c3\u03c1\u03b1\u03ae\u03bb', 'FR': u'\u0393\u03b1\u03bb\u03bb\u03af\u03b1', 'IO': u'\u0392\u03c1\u03b5\u03c4\u03b1\u03bd\u03b9\u03ba\u03ac \u0388\u03b4\u03ac\u03c6\u03b7 \u0399\u03bd\u03b4\u03b9\u03ba\u03bf\u03cd \u03a9\u03ba\u03b5\u03b1\u03bd\u03bf\u03cd', 'SH': u'\u0391\u03b3\u03af\u03b1 \u0395\u03bb\u03ad\u03bd\u03b7', 'FI': u'\u03a6\u03b9\u03bd\u03bb\u03b1\u03bd\u03b4\u03af\u03b1', 'FJ': u'\u03a6\u03af\u03c4\u03b6\u03b9', 'FK': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u03a6\u03ce\u03ba\u03bb\u03b1\u03bd\u03c4', 'FM': u'\u039c\u03b9\u03ba\u03c1\u03bf\u03bd\u03b7\u03c3\u03af\u03b1, \u039f\u03bc\u03cc\u03c3\u03c0\u03bf\u03bd\u03b4\u03b5\u03c2 \u03a0\u03bf\u03bb\u03b9\u03c4\u03b5\u03af\u03b5\u03c2 \u03c4\u03b7\u03c2', 'FO': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u03a6\u03b5\u03c1\u03cc\u03b5\u03c2', 'NI': u'\u039d\u03b9\u03ba\u03b1\u03c1\u03ac\u03b3\u03bf\u03c5\u03b1', 'NL': u'\u039f\u03bb\u03bb\u03b1\u03bd\u03b4\u03af\u03b1', 'NO': u'\u039d\u03bf\u03c1\u03b2\u03b7\u03b3\u03af\u03b1', 'NA': u'\u039d\u03b1\u03bc\u03af\u03bc\u03c0\u03b9\u03b1', 'VU': u'\u0392\u03b1\u03bd\u03bf\u03c5\u03ac\u03c4\u03bf\u03c5', 'NC': u'\u039d\u03ad\u03b1 \u039a\u03b1\u03bb\u03b7\u03b4\u03bf\u03bd\u03af\u03b1', 'NE': u'\u039d\u03af\u03b3\u03b7\u03c1', 'NF': u'\u039d\u03ae\u03c3\u03bf\u03c2 \u039d\u03cc\u03c1\u03c6\u03bf\u03bb\u03ba', 'NG': u'\u039d\u03b9\u03b3\u03b7\u03c1\u03af\u03b1', 'NZ': u'\u039d\u03ad\u03b1 \u0396\u03b7\u03bb\u03b1\u03bd\u03b4\u03af\u03b1', 'NP': u'\u039d\u03b5\u03c0\u03ac\u03bb', 'NR': u'\u039d\u03b1\u03bf\u03cd\u03c1\u03bf\u03c5', 'NU': u'\u039d\u03b9\u03bf\u03cd\u03b5', 'CK': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u039a\u03bf\u03c5\u03ba', 'CI': u'\u0391\u03ba\u03c4\u03ae \u0395\u03bb\u03b5\u03c6\u03b1\u03bd\u03c4\u03cc\u03b4\u03bf\u03bd\u03c4\u03bf\u03c2', 'CH': u'\u0395\u03bb\u03b2\u03b5\u03c4\u03af\u03b1', 'CO': u'\u039a\u03bf\u03bb\u03bf\u03bc\u03b2\u03af\u03b1', 'CN': u'\u039a\u03af\u03bd\u03b1', 'CM': u'\u039a\u03b1\u03bc\u03b5\u03c1\u03bf\u03cd\u03bd', 'CL': u'\u03a7\u03b9\u03bb\u03ae', 'CC': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u039a\u03cc\u03ba\u03bf\u03c2 (\u039a\u03ae\u03bb\u03b9\u03bd\u03b3\u03ba)', 'CA': u'\u039a\u03b1\u03bd\u03b1\u03b4\u03ac\u03c2', 'CG': u'\u039a\u03bf\u03bd\u03b3\u03ba\u03cc', 'CF': u'\u039a\u03b5\u03bd\u03c4\u03c1\u03bf\u03b1\u03c6\u03c1\u03b9\u03ba\u03b1\u03bd\u03b9\u03ba\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1', 'CD': u'\u039a\u03bf\u03bd\u03b3\u03ba\u03cc, \u039b\u03b1\u03ca\u03ba\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1 \u03c4\u03bf\u03c5', 'CZ': u'\u03a4\u03c3\u03b5\u03c7\u03af\u03b1', 'CY': u'\u039a\u03cd\u03c0\u03c1\u03bf\u03c2', 'CX': u'\u039d\u03ae\u03c3\u03bf\u03c2 \u03a7\u03c1\u03b9\u03c3\u03c4\u03bf\u03c5\u03b3\u03ad\u03bd\u03bd\u03c9\u03bd', 'CR': u'\u039a\u03cc\u03c3\u03c4\u03b1 \u03a1\u03af\u03ba\u03b1', 'CV': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u03a0\u03c1\u03ac\u03c3\u03b9\u03bd\u03bf\u03c5 \u0391\u03ba\u03c1\u03c9\u03c4\u03b7\u03c1\u03af\u03bf\u03c5', 'CU': u'\u039a\u03bf\u03cd\u03b2\u03b1', 'SZ': u'\u03a3\u03bf\u03c5\u03b1\u03b6\u03b9\u03bb\u03ac\u03bd\u03b4\u03b7', 'SY': u'\u03a3\u03c5\u03c1\u03af\u03b1, \u0391\u03c1\u03b1\u03b2\u03b9\u03ba\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1 \u03c4\u03b7\u03c2', 'KG': u'\u039a\u03b9\u03c1\u03b3\u03b9\u03b6\u03af\u03b1', 'KE': u'\u039a\u03ad\u03bd\u03c5\u03b1', 'SR': u'\u03a3\u03bf\u03c5\u03c1\u03b9\u03bd\u03ac\u03bc', 'KI': u'\u039a\u03b9\u03c1\u03b9\u03bc\u03c0\u03ac\u03c4\u03b9', 'KH': u'\u039a\u03b1\u03bc\u03c0\u03cc\u03c4\u03b6\u03b7', 'KN': u'\u03a3\u03b1\u03b9\u03bd\u03c4 \u039a\u03b9\u03c4\u03c2 \u03ba\u03b1\u03b9 \u039d\u03ad\u03b2\u03b9\u03c2', 'KM': u'\u039a\u03bf\u03bc\u03cc\u03c1\u03b5\u03c2', 'ST': u'\u03a3\u03ac\u03bf \u03a4\u03bf\u03bc\u03ad \u03ba\u03b1\u03b9 \u03a0\u03c1\u03af\u03bd\u03c3\u03b9\u03c0\u03b5', 'SK': u'\u03a3\u03bb\u03bf\u03b2\u03b1\u03ba\u03af\u03b1', 'KR': u'\u039a\u03bf\u03c1\u03ad\u03b1, \u039d\u03cc\u03c4\u03b9\u03b1', 'SI': u'\u03a3\u03bb\u03bf\u03b2\u03b5\u03bd\u03af\u03b1', 'KP': u'\u039a\u03bf\u03c1\u03ad\u03b1, \u0392\u03cc\u03c1\u03b5\u03b9\u03b1', 'KW': u'\u039a\u03bf\u03c5\u03b2\u03ad\u03b9\u03c4', 'SN': u'\u03a3\u03b5\u03bd\u03b5\u03b3\u03ac\u03bb\u03b7', 'SM': u'\u0386\u03b3\u03b9\u03bf\u03c2 \u039c\u03b1\u03c1\u03af\u03bd\u03bf\u03c2', 'SL': u'\u03a3\u03b9\u03ad\u03c1\u03b1 \u039b\u03b5\u03cc\u03bd\u03b5', 'SC': u'\u03a3\u03b5\u03cb\u03c7\u03ad\u03bb\u03bb\u03b5\u03c2', 'KZ': u'\u039a\u03b1\u03b6\u03b1\u03ba\u03c3\u03c4\u03ac\u03bd', 'KY': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u039a\u03ad\u03b9\u03bc\u03b1\u03bd', 'SG': u'\u03a3\u03b9\u03b3\u03ba\u03b1\u03c0\u03bf\u03cd\u03c1\u03b7', 'SE': u'\u03a3\u03bf\u03c5\u03b7\u03b4\u03af\u03b1', 'SD': u'\u03a3\u03bf\u03c5\u03b4\u03ac\u03bd', 'DO': u'\u0394\u03bf\u03bc\u03b9\u03bd\u03b9\u03ba\u03b1\u03bd\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1', 'DM': u'\u039d\u03c4\u03bf\u03bc\u03af\u03bd\u03b9\u03ba\u03b1', 'DJ': u'\u03a4\u03b6\u03b9\u03bc\u03c0\u03bf\u03c5\u03c4\u03af', 'DK': u'\u0394\u03b1\u03bd\u03af\u03b1', 'VG': u'\u0392\u03c1\u03b5\u03c4\u03b1\u03bd\u03b9\u03ba\u03ad\u03c2 \u03a0\u03b1\u03c1\u03b8\u03ad\u03bd\u03bf\u03b9 \u039d\u03ae\u03c3\u03bf\u03b9', 'DE': u'\u0393\u03b5\u03c1\u03bc\u03b1\u03bd\u03af\u03b1', 'YE': u'\u03a5\u03b5\u03bc\u03ad\u03bd\u03b7', 'DZ': u'\u0391\u03bb\u03b3\u03b5\u03c1\u03af\u03b1', 'US': u'\u0397\u03bd\u03c9\u03bc\u03ad\u03bd\u03b5\u03c2 \u03a0\u03bf\u03bb\u03b9\u03c4\u03b5\u03af\u03b5\u03c2', 'UY': u'\u039f\u03c5\u03c1\u03bf\u03c5\u03b3\u03bf\u03c5\u03ac\u03b7', 'YU': u'\u0393\u03b9\u03bf\u03c5\u03b3\u03ba\u03bf\u03c3\u03bb\u03b1\u03b2\u03af\u03b1', 'YT': u'\u039c\u03b1\u03b3\u03b9\u03cc\u03c4', 'UM': u'\u0391\u03c0\u03bf\u03bc\u03b1\u03ba\u03c1\u03c5\u03c3\u03bc\u03ad\u03bd\u03b5\u03c2 \u039d\u03b7\u03c3\u03af\u03b4\u03b5\u03c2 \u03c4\u03c9\u03bd \u0397\u03bd\u03c9\u03bc\u03ad\u03bd\u03c9\u03bd \u03a0\u03bf\u03bb\u03b9\u03c4\u03b5\u03b9\u03ce\u03bd', 'LB': u'\u039b\u03af\u03b2\u03b1\u03bd\u03bf\u03c2', 'LC': u'\u0391\u03b3\u03af\u03b1 \u039b\u03bf\u03c5\u03ba\u03af\u03b1', 'LA': u'\u039b\u03b1\u03c4\u03b9\u03bd\u03b9\u03ba\u03ae \u0391\u03bc\u03b5\u03c1\u03b9\u03ba\u03ae', 'TV': u'\u03a4\u03bf\u03c5\u03b2\u03b1\u03bb\u03bf\u03cd', 'TW': u'\u03a4\u03b1\u03ca\u03b2\u03ac\u03bd (\u0394.\u039a.)', 'TT': u'\u03a4\u03c1\u03b9\u03bd\u03b9\u03b4\u03ac\u03b4 \u03ba\u03b1\u03b9 \u03a4\u03bf\u03bc\u03c0\u03ac\u03b3\u03ba\u03bf', 'TR': u'\u03a4\u03bf\u03c5\u03c1\u03ba\u03af\u03b1', 'LK': u'\u03a3\u03c1\u03b9 \u039b\u03ac\u03bd\u03ba\u03b1', 'LI': u'\u039b\u03b9\u03c7\u03c4\u03b5\u03bd\u03c3\u03c4\u03ac\u03b9\u03bd', 'LV': u'\u039b\u03b5\u03c4\u03bf\u03bd\u03af\u03b1', 'TO': u'\u03a4\u03cc\u03bd\u03b3\u03ba\u03b1', 'LT': u'\u039b\u03b9\u03b8\u03bf\u03c5\u03b1\u03bd\u03af\u03b1', 'LU': u'\u039b\u03bf\u03c5\u03be\u03b5\u03bc\u03b2\u03bf\u03cd\u03c1\u03b3\u03bf', 'LR': u'\u039b\u03b9\u03b2\u03b5\u03c1\u03af\u03b1', 'LS': u'\u039b\u03b5\u03c3\u03cc\u03c4\u03bf', 'TH': u'\u03a4\u03b1\u03ca\u03bb\u03ac\u03bd\u03b4\u03b7', 'TF': u'\u0393\u03b1\u03bb\u03bb\u03b9\u03ba\u03ac \u039d\u03cc\u03c4\u03b9\u03b1 \u0395\u03b4\u03ac\u03c6\u03b7', 'TG': u'\u03a4\u03cc\u03b3\u03ba\u03bf', 'TD': u'\u03a4\u03c3\u03b1\u03bd\u03c4', 'TC': u'\u039d\u03ae\u03c3\u03bf\u03b9 \u03a4\u03b5\u03c1\u03ba\u03c2 \u03ba\u03b1\u03b9 \u039a\u03ac\u03b9\u03ba\u03bf\u03c2', 'VA': u'\u0391\u03b3\u03af\u03b1 \u0388\u03b4\u03c1\u03b1 (\u0392\u03b1\u03c4\u03b9\u03ba\u03b1\u03bd\u03cc)', 'VC': u'\u0386\u03b3\u03b9\u03bf\u03c2 \u0392\u03b9\u03ba\u03ad\u03bd\u03c4\u03b9\u03bf\u03c2 \u03ba\u03b1\u03b9 \u0393\u03c1\u03b5\u03bd\u03b1\u03b4\u03af\u03bd\u03b5\u03c2', 'AE': u'\u0397\u03bd\u03c9\u03bc\u03ad\u03bd\u03b1 \u0391\u03c1\u03b1\u03b2\u03b9\u03ba\u03ac \u0395\u03bc\u03b9\u03c1\u03ac\u03c4\u03b1', 'AD': u'\u0391\u03bd\u03b4\u03cc\u03c1\u03b1', 'AG': u'\u0391\u03bd\u03c4\u03af\u03b3\u03ba\u03bf\u03c5\u03b1 \u03ba\u03b1\u03b9 \u039c\u03c0\u03b1\u03c1\u03bc\u03c0\u03bf\u03cd\u03bd\u03c4\u03b1', 'AF': u'\u0391\u03c6\u03b3\u03b1\u03bd\u03b9\u03c3\u03c4\u03ac\u03bd', 'AI': u'\u0391\u03bd\u03b3\u03ba\u03bf\u03c5\u03af\u03bb\u03b1', 'VI': u'\u0391\u03bc\u03b5\u03c1\u03b9\u03ba\u03b1\u03bd\u03b9\u03ba\u03ad\u03c2 \u03a0\u03b1\u03c1\u03b8\u03ad\u03bd\u03bf\u03b9 \u039d\u03ae\u03c3\u03bf\u03b9', 'IS': u'\u0399\u03c3\u03bb\u03b1\u03bd\u03b4\u03af\u03b1', 'IR': u'\u0399\u03c1\u03ac\u03bd, \u0399\u03c3\u03bb\u03b1\u03bc\u03b9\u03ba\u03ae \u0394\u03b7\u03bc\u03bf\u03ba\u03c1\u03b1\u03c4\u03af\u03b1 \u03c4\u03bf\u03c5', 'AM': u'\u0391\u03c1\u03bc\u03b5\u03bd\u03af\u03b1', 'AL': u'\u0391\u03bb\u03b2\u03b1\u03bd\u03af\u03b1', 'AO': u'\u0391\u03bd\u03b3\u03ba\u03cc\u03bb\u03b1', 'AN': u'\u039f\u03bb\u03bb\u03b1\u03bd\u03b4\u03b9\u03ba\u03ad\u03c2 \u0391\u03bd\u03c4\u03af\u03bb\u03bb\u03b5\u03c2', 'AQ': u'\u0391\u03bd\u03c4\u03b1\u03c1\u03ba\u03c4\u03b9\u03ba\u03ae', 'AS': u'\u0391\u03bc\u03b5\u03c1\u03b9\u03ba\u03b1\u03bd\u03b9\u03ba\u03ae \u03a3\u03b1\u03bc\u03cc\u03b1', 'AR': u'\u0391\u03c1\u03b3\u03b5\u03bd\u03c4\u03b9\u03bd\u03ae', 'AU': u'\u0391\u03c5\u03c3\u03c4\u03c1\u03b1\u03bb\u03af\u03b1', 'AT': u'\u0391\u03c5\u03c3\u03c4\u03c1\u03af\u03b1', 'AW': u'\u0391\u03c1\u03bf\u03cd\u03bc\u03c0\u03b1', 'IN': u'\u0399\u03bd\u03b4\u03af\u03b1', 'TZ': u'\u03a4\u03b1\u03bd\u03b6\u03b1\u03bd\u03af\u03b1', 'AZ': u'\u0391\u03b6\u03b5\u03c1\u03bc\u03c0\u03b1\u03ca\u03c4\u03b6\u03ac\u03bd', 'IE': u'\u0399\u03c1\u03bb\u03b1\u03bd\u03b4\u03af\u03b1', 'ID': u'\u0399\u03bd\u03b4\u03bf\u03bd\u03b7\u03c3\u03af\u03b1', 'UA': u'\u039f\u03c5\u03ba\u03c1\u03b1\u03bd\u03af\u03b1', 'QA': u'\u039a\u03b1\u03c4\u03ac\u03c1', 'MZ': u'\u039c\u03bf\u03b6\u03b1\u03bc\u03b2\u03af\u03ba\u03b7'}
months=[u'\u0399\u03b1\u03bd\u03bf\u03c5\u03ac\u03c1\u03b9\u03bf\u03c2', u'\u03a6\u03b5\u03b2\u03c1\u03bf\u03c5\u03ac\u03c1\u03b9\u03bf\u03c2', u'\u039c\u03ac\u03c1\u03c4\u03b9\u03bf\u03c2', u'\u0391\u03c0\u03c1\u03af\u03bb\u03b9\u03bf\u03c2', u'\u039c\u03ac\u03b9\u03bf\u03c2', u'\u0399\u03bf\u03cd\u03bd\u03b9\u03bf\u03c2', u'\u0399\u03bf\u03cd\u03bb\u03b9\u03bf\u03c2', u'\u0391\u03cd\u03b3\u03bf\u03c5\u03c3\u03c4\u03bf\u03c2', u'\u03a3\u03b5\u03c0\u03c4\u03ad\u03bc\u03b2\u03c1\u03b9\u03bf\u03c2', u'\u039f\u03ba\u03c4\u03ce\u03b2\u03c1\u03b9\u03bf\u03c2', u'\u039d\u03bf\u03ad\u03bc\u03b2\u03c1\u03b9\u03bf\u03c2', u'\u0394\u03b5\u03ba\u03ad\u03bc\u03b2\u03c1\u03b9\u03bf\u03c2']
abbrMonths=[u'\u0399\u03b1\u03bd', u'\u03a6\u03b5\u03b2', u'\u039c\u03b1\u03c1', u'\u0391\u03c0\u03c1', u'\u039c\u03b1\u03ca', u'\u0399\u03bf\u03c5\u03bd', u'\u0399\u03bf\u03c5\u03bb', u'\u0391\u03c5\u03b3', u'\u03a3\u03b5\u03c0', u'\u039f\u03ba\u03c4', u'\u039d\u03bf\u03b5', u'\u0394\u03b5\u03ba']
days=[u'\u0394\u03b5\u03c5\u03c4\u03ad\u03c1\u03b1', u'\u03a4\u03c1\u03af\u03c4\u03b7', u'\u03a4\u03b5\u03c4\u03ac\u03c1\u03c4\u03b7', u'\u03a0\u03ad\u03bc\u03c0\u03c4\u03b7', u'\u03a0\u03b1\u03c1\u03b1\u03c3\u03ba\u03b5\u03c5\u03ae', u'\u03a3\u03ac\u03b2\u03b2\u03b1\u03c4\u03bf', u'\u039a\u03c5\u03c1\u03b9\u03b1\u03ba\u03ae']
abbrDays=[u'\u0394\u03b5\u03c5', u'\u03a4\u03c1\u03b9', u'\u03a4\u03b5\u03c4', u'\u03a0\u03b5\u03bc', u'\u03a0\u03b1\u03c1', u'\u03a3\u03b1\u03b2', u'\u039a\u03c5\u03c1']
dateFormats={'medium': '%d %%(abbrmonthname)s %Y', 'full': '%%(dayname)s, %d %%(monthname)s %Y', 'long': '%d %%(monthname)s %Y', 'short': '%d/%m/%Y'}
numericSymbols={'group': '.', 'nativeZeroDigit': '0', 'exponential': 'E', 'perMille': u'\u2030', 'nan': u'\ufffd', 'decimal': ',', 'percentSign': '%', 'list': ';', 'patternDigit': '#', 'plusSign': '+', 'infinity': u'\u221e', 'minusSign': '-'} | [
"[email protected]"
] | |
901b405f4a2a51fd6ca9bfd5094110f8809a137e | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_1_neat/16_0_1_ankso_problem1.py | abdcde8c10348975b61eb615936ae90fb286edb5 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 736 | py |
def returnList(n):
n = str(n)
digits = list(n)
return digits
def check(all):
stat = False
for i in range(10):
if str(i) in all:
stat = True
else:
stat = False
break
return stat
testCases = int(raw_input())
for i in range(testCases):
N = int(raw_input())
if N == 0:
print "Case #"+str(i+1)+": INSOMNIA"
else:
listOfNum = returnList(N)
j=1
while True:
if check(listOfNum):
print "Case #"+str(i+1)+": "+str(newNumber)
break
j = j+1
newNumber = N*j
listOfNum.extend(returnList(newNumber))
listOfNum = list(set(listOfNum))
| [
"[[email protected]]"
] | |
d4529fd488e177eff6820f5688b7d6fd9790eab3 | c43fbcb4442428e85616f664964d1e27ca396070 | /runs/malte/simple/config.py | 5d9a5f0f13ad31b04abdecbc9011127e24d6fd1c | [] | no_license | megalut/megalut | ddac89a0dca70e13979d31b80d52233226233ade | 63bd4bec8000ad13f4963d464d7b7b4d470a36ab | refs/heads/master | 2020-04-15T00:33:42.815988 | 2018-09-11T08:45:48 | 2018-09-11T08:45:48 | 20,882,727 | 2 | 1 | null | 2018-09-11T08:45:49 | 2014-06-16T11:39:14 | Python | UTF-8 | Python | false | false | 317 | py |
import megalut
import megalut.learn
import os
import numpy as np
import logging
#logging.basicConfig(level=logging.INFO)
logging.basicConfig(format='PID %(process)06d | %(asctime)s | %(levelname)s: %(name)s(%(funcName)s): %(message)s',level=logging.INFO)
workdir = "/vol/fohlen11/fohlen11_1/mtewes/simplewd/"
| [
"[email protected]"
] | |
918d1f2c0d7a9e30280136bb90e114355d60de4c | 63bacb52d016cf7a237dacd79ba2861842c49ca9 | /test/test_put_write_off_invoice_response_credit_memo.py | e43a4fa1e412ddeef0e6976ce236a3740fe64cd4 | [] | no_license | arundharumar-optimizely/zuora-client-python | ee9667956b32b64b456920ad6246e02528fe6645 | a529a01364e41844c91f39df300c85c8d332912a | refs/heads/master | 2020-07-05T23:09:20.081816 | 2019-07-30T21:46:47 | 2019-07-30T21:46:47 | 202,811,594 | 0 | 0 | null | 2019-08-16T23:26:52 | 2019-08-16T23:26:52 | null | UTF-8 | Python | false | false | 40,430 | py | # coding: utf-8
"""
Zuora API Reference
# Introduction Welcome to the reference for the Zuora REST API! <a href=\"http://en.wikipedia.org/wiki/REST_API\" target=\"_blank\">REST</a> is a web-service protocol that lends itself to rapid development by using everyday HTTP and JSON technology. The Zuora REST API provides a broad set of operations and resources that: * Enable Web Storefront integration from your website. * Support self-service subscriber sign-ups and account management. * Process revenue schedules through custom revenue rule models. * Enable manipulation of most objects in the Zuora Object Model. Want to share your opinion on how our API works for you? <a href=\"https://community.zuora.com/t5/Developers/API-Feedback-Form/gpm-p/21399\" target=\"_blank\">Tell us how you feel </a>about using our API and what we can do to make it better. ## Access to the API If you have a Zuora tenant, you can access the Zuora REST API via one of the following endpoints: | Tenant | Base URL for REST Endpoints | |-------------------------|-------------------------| |US Production | https://rest.zuora.com | |US API Sandbox | https://rest.apisandbox.zuora.com| |US Performance Test | https://rest.pt1.zuora.com | |EU Production | https://rest.eu.zuora.com | |EU Sandbox | https://rest.sandbox.eu.zuora.com | The Production endpoint provides access to your live user data. API Sandbox tenants are a good place to test code without affecting real-world data. If you would like Zuora to provision an API Sandbox tenant for you, contact your Zuora representative for assistance. **Note:** If you have a tenant in the Production Copy Environment, submit a request at <a href=\"http://support.zuora.com/\" target=\"_blank\">Zuora Global Support</a> to enable the Zuora REST API in your tenant and obtain the base URL for REST endpoints. If you do not have a Zuora tenant, go to <a href=\"https://www.zuora.com/resource/zuora-test-drive\" target=\"_blank\">https://www.zuora.com/resource/zuora-test-drive</a> and sign up for a Production Test Drive tenant. The tenant comes with seed data, including a sample product catalog. # API Changelog You can find the <a href=\"https://community.zuora.com/t5/Developers/API-Changelog/gpm-p/18092\" target=\"_blank\">Changelog</a> of the API Reference in the Zuora Community. # Authentication ## OAuth v2.0 Zuora recommends that you use OAuth v2.0 to authenticate to the Zuora REST API. Currently, OAuth is not available in every environment. See [Zuora Testing Environments](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/D_Zuora_Environments) for more information. Zuora recommends you to create a dedicated API user with API write access on a tenant when authenticating via OAuth, and then create an OAuth client for this user. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for how to do this. By creating a dedicated API user, you can control permissions of the API user without affecting other non-API users. If a user is deactivated, all of the user's OAuth clients will be automatically deactivated. Authenticating via OAuth requires the following steps: 1. Create a Client 2. Generate a Token 3. Make Authenticated Requests ### Create a Client You must first [create an OAuth client](https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users#Create_an_OAuth_Client_for_a_User) in the Zuora UI. To do this, you must be an administrator of your Zuora tenant. This is a one-time operation. You will be provided with a Client ID and a Client Secret. Please note this information down, as it will be required for the next step. **Note:** The OAuth client will be owned by a Zuora user account. If you want to perform PUT, POST, or DELETE operations using the OAuth client, the owner of the OAuth client must have a Platform role that includes the \"API Write Access\" permission. ### Generate a Token After creating a client, you must make a call to obtain a bearer token using the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) operation. This operation requires the following parameters: - `client_id` - the Client ID displayed when you created the OAuth client in the previous step - `client_secret` - the Client Secret displayed when you created the OAuth client in the previous step - `grant_type` - must be set to `client_credentials` **Note**: The Client ID and Client Secret mentioned above were displayed when you created the OAuth Client in the prior step. The [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response specifies how long the bearer token is valid for. Call [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) again to generate a new bearer token. ### Make Authenticated Requests To authenticate subsequent API requests, you must provide a valid bearer token in an HTTP header: `Authorization: Bearer {bearer_token}` If you have [Zuora Multi-entity](https://www.zuora.com/developer/api-reference/#tag/Entities) enabled, you need to set an additional header to specify the ID of the entity that you want to access. You can use the `scope` field in the [Generate an OAuth token](https://www.zuora.com/developer/api-reference/#operation/createToken) response to determine whether you need to specify an entity ID. If the `scope` field contains more than one entity ID, you must specify the ID of the entity that you want to access. For example, if the `scope` field contains `entity.1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` and `entity.c92ed977-510c-4c48-9b51-8d5e848671e9`, specify one of the following headers: - `Zuora-Entity-Ids: 1a2b7a37-3e7d-4cb3-b0e2-883de9e766cc` - `Zuora-Entity-Ids: c92ed977-510c-4c48-9b51-8d5e848671e9` **Note**: For a limited period of time, Zuora will accept the `entityId` header as an alternative to the `Zuora-Entity-Ids` header. If you choose to set the `entityId` header, you must remove all \"-\" characters from the entity ID in the `scope` field. If the `scope` field contains a single entity ID, you do not need to specify an entity ID. ## Other Supported Authentication Schemes Zuora continues to support the following additional legacy means of authentication: * Use username and password. Include authentication with each request in the header: * `apiAccessKeyId` * `apiSecretAccessKey` Zuora recommends that you create an API user specifically for making API calls. See <a href=\"https://knowledgecenter.zuora.com/CF_Users_and_Administrators/A_Administrator_Settings/Manage_Users/Create_an_API_User\" target=\"_blank\">Create an API User</a> for more information. * Use an authorization cookie. The cookie authorizes the user to make calls to the REST API for the duration specified in **Administration > Security Policies > Session timeout**. The cookie expiration time is reset with this duration after every call to the REST API. To obtain a cookie, call the [Connections](https://www.zuora.com/developer/api-reference/#tag/Connections) resource with the following API user information: * ID * Password * For CORS-enabled APIs only: Include a 'single-use' token in the request header, which re-authenticates the user with each request. See below for more details. ### Entity Id and Entity Name The `entityId` and `entityName` parameters are only used for [Zuora Multi-entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity \"Zuora Multi-entity\"). These are the legacy parameters that Zuora will only continue to support for a period of time. Zuora recommends you to use the `Zuora-Entity-Ids` parameter instead. The `entityId` and `entityName` parameters specify the Id and the [name of the entity](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/B_Introduction_to_Entity_and_Entity_Hierarchy#Name_and_Display_Name \"Introduction to Entity and Entity Hierarchy\") that you want to access, respectively. Note that you must have permission to access the entity. You can specify either the `entityId` or `entityName` parameter in the authentication to access and view an entity. * If both `entityId` and `entityName` are specified in the authentication, an error occurs. * If neither `entityId` nor `entityName` is specified in the authentication, you will log in to the entity in which your user account is created. To get the entity Id and entity name, you can use the GET Entities REST call. For more information, see [API User Authentication](https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Multi-entity/A_Overview_of_Multi-entity#API_User_Authentication \"API User Authentication\"). ### Token Authentication for CORS-Enabled APIs The CORS mechanism enables REST API calls to Zuora to be made directly from your customer's browser, with all credit card and security information transmitted directly to Zuora. This minimizes your PCI compliance burden, allows you to implement advanced validation on your payment forms, and makes your payment forms look just like any other part of your website. For security reasons, instead of using cookies, an API request via CORS uses **tokens** for authentication. The token method of authentication is only designed for use with requests that must originate from your customer's browser; **it should not be considered a replacement to the existing cookie authentication** mechanism. See [Zuora CORS REST](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Zuora_CORS_REST \"Zuora CORS REST\") for details on how CORS works and how you can begin to implement customer calls to the Zuora REST APIs. See [HMAC Signatures](https://www.zuora.com/developer/api-reference/#operation/POSTHMACSignature \"HMAC Signatures\") for details on the HMAC method that returns the authentication token. # Requests and Responses ## Request IDs As a general rule, when asked to supply a \"key\" for an account or subscription (accountKey, account-key, subscriptionKey, subscription-key), you can provide either the actual ID or the number of the entity. ## HTTP Request Body Most of the parameters and data accompanying your requests will be contained in the body of the HTTP request. The Zuora REST API accepts JSON in the HTTP request body. No other data format (e.g., XML) is supported. ### Data Type ([Actions](https://www.zuora.com/developer/api-reference/#tag/Actions) and CRUD operations only) We recommend that you do not specify the decimal values with quotation marks, commas, and spaces. Use characters of `+-0-9.eE`, for example, `5`, `1.9`, `-8.469`, and `7.7e2`. Also, Zuora does not convert currencies for decimal values. ## Testing a Request Use a third party client, such as [curl](https://curl.haxx.se \"curl\"), [Postman](https://www.getpostman.com \"Postman\"), or [Advanced REST Client](https://advancedrestclient.com \"Advanced REST Client\"), to test the Zuora REST API. You can test the Zuora REST API from the Zuora API Sandbox or Production tenants. If connecting to Production, bear in mind that you are working with your live production data, not sample data or test data. ## Testing with Credit Cards Sooner or later it will probably be necessary to test some transactions that involve credit cards. For suggestions on how to handle this, see [Going Live With Your Payment Gateway](https://knowledgecenter.zuora.com/CB_Billing/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards \"C_Zuora_User_Guides/A_Billing_and_Payments/M_Payment_Gateways/C_Managing_Payment_Gateways/B_Going_Live_Payment_Gateways#Testing_with_Credit_Cards\" ). ## Concurrent Request Limits Zuora enforces tenant-level concurrent request limits. See <a href=\"https://knowledgecenter.zuora.com/BB_Introducing_Z_Business/Policies/Concurrent_Request_Limits\" target=\"_blank\">Concurrent Request Limits</a> for more information. ## Timeout Limit If a request does not complete within 120 seconds, the request times out and Zuora returns a Gateway Timeout error. ## Error Handling Responses and error codes are detailed in [Responses and errors](https://knowledgecenter.zuora.com/DC_Developers/C_REST_API/Responses_and_Errors \"Responses and errors\"). # Pagination When retrieving information (using GET methods), the optional `pageSize` query parameter sets the maximum number of rows to return in a response. The maximum is `40`; larger values are treated as `40`. If this value is empty or invalid, `pageSize` typically defaults to `10`. The default value for the maximum number of rows retrieved can be overridden at the method level. If more rows are available, the response will include a `nextPage` element, which contains a URL for requesting the next page. If this value is not provided, no more rows are available. No \"previous page\" element is explicitly provided; to support backward paging, use the previous call. ## Array Size For data items that are not paginated, the REST API supports arrays of up to 300 rows. Thus, for instance, repeated pagination can retrieve thousands of customer accounts, but within any account an array of no more than 300 rate plans is returned. # API Versions The Zuora REST API are version controlled. Versioning ensures that Zuora REST API changes are backward compatible. Zuora uses a major and minor version nomenclature to manage changes. By specifying a version in a REST request, you can get expected responses regardless of future changes to the API. ## Major Version The major version number of the REST API appears in the REST URL. Currently, Zuora only supports the **v1** major version. For example, `POST https://rest.zuora.com/v1/subscriptions`. ## Minor Version Zuora uses minor versions for the REST API to control small changes. For example, a field in a REST method is deprecated and a new field is used to replace it. Some fields in the REST methods are supported as of minor versions. If a field is not noted with a minor version, this field is available for all minor versions. If a field is noted with a minor version, this field is in version control. You must specify the supported minor version in the request header to process without an error. If a field is in version control, it is either with a minimum minor version or a maximum minor version, or both of them. You can only use this field with the minor version between the minimum and the maximum minor versions. For example, the `invoiceCollect` field in the POST Subscription method is in version control and its maximum minor version is 189.0. You can only use this field with the minor version 189.0 or earlier. If you specify a version number in the request header that is not supported, Zuora will use the minimum minor version of the REST API. In our REST API documentation, if a field or feature requires a minor version number, we note that in the field description. You only need to specify the version number when you use the fields require a minor version. To specify the minor version, set the `zuora-version` parameter to the minor version number in the request header for the request call. For example, the `collect` field is in 196.0 minor version. If you want to use this field for the POST Subscription method, set the `zuora-version` parameter to `196.0` in the request header. The `zuora-version` parameter is case sensitive. For all the REST API fields, by default, if the minor version is not specified in the request header, Zuora will use the minimum minor version of the REST API to avoid breaking your integration. ### Minor Version History The supported minor versions are not serial. This section documents the changes made to each Zuora REST API minor version. The following table lists the supported versions and the fields that have a Zuora REST API minor version. | Fields | Minor Version | REST Methods | Description | |:--------|:--------|:--------|:--------| | invoiceCollect | 189.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice and collects a payment for a subscription. | | collect | 196.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Collects an automatic payment for a subscription. | | invoice | 196.0 and 207.0| [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice for a subscription. | | invoiceTargetDate | 196.0 and earlier | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | invoiceTargetDate | 207.0 and earlier | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") |Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | targetDate | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Date through which charges are calculated on the invoice, as `yyyy-mm-dd`. | | includeExisting DraftInvoiceItems | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | includeExisting DraftDocItems | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | Specifies whether to include draft invoice items in subscription previews. Specify it to be `true` (default) to include draft invoice items in the preview result. Specify it to be `false` to excludes draft invoice items in the preview result. | | previewType | 196.0 and earlier| [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `InvoiceItem`(default), `ChargeMetrics`, and `InvoiceItemChargeMetrics`. | | previewType | 207.0 and later | [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") | The type of preview you will receive. The possible values are `LegalDoc`(default), `ChargeMetrics`, and `LegalDocChargeMetrics`. | | runBilling | 211.0 and later | [Create Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_Subscription \"Create Subscription\"); [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\"); [Renew Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_RenewSubscription \"Renew Subscription\"); [Cancel Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_CancelSubscription \"Cancel Subscription\"); [Suspend Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_SuspendSubscription \"Suspend Subscription\"); [Resume Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_ResumeSubscription \"Resume Subscription\"); [Create Account](https://www.zuora.com/developer/api-reference/#operation/POST_Account \"Create Account\")|Generates an invoice or credit memo for a subscription. **Note:** Credit memos are only available if you have the Invoice Settlement feature enabled. | | invoiceDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice being generated, as `yyyy-mm-dd`. | | invoiceTargetDate | 214.0 and earlier | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice is generated, as `yyyy-mm-dd`. | | documentDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date that should appear on the invoice and credit memo being generated, as `yyyy-mm-dd`. | | targetDate | 215.0 and later | [Invoice and Collect](https://www.zuora.com/developer/api-reference/#operation/POST_TransactionInvoicePayment \"Invoice and Collect\") |Date through which to calculate charges on this account if an invoice or a credit memo is generated, as `yyyy-mm-dd`. | | memoItemAmount | 223.0 and earlier | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | amount | 224.0 and later | [Create credit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_CreditMemoFromPrpc \"Create credit memo from charge\"); [Create debit memo from charge](https://www.zuora.com/developer/api-reference/#operation/POST_DebitMemoFromPrpc \"Create debit memo from charge\") | Amount of the memo item. | | subscriptionNumbers | 222.4 and earlier | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers of the subscriptions in an order. | | subscriptions | 223.0 and later | [Create order](https://www.zuora.com/developer/api-reference/#operation/POST_Order \"Create order\") | Container for the subscription numbers and statuses in an order. | | creditTaxItems | 238.0 and earlier | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\") | Container for the taxation items of the credit memo item. | | taxItems | 238.0 and earlier | [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the debit memo item. | | taxationItems | 239.0 and later | [Get credit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItems \"Get credit memo items\"); [Get credit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_CreditMemoItem \"Get credit memo item\"); [Get debit memo items](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItems \"Get debit memo items\"); [Get debit memo item](https://www.zuora.com/developer/api-reference/#operation/GET_DebitMemoItem \"Get debit memo item\") | Container for the taxation items of the memo item. | #### Version 207.0 and Later The response structure of the [Preview Subscription](https://www.zuora.com/developer/api-reference/#operation/POST_SubscriptionPreview \"Preview Subscription\") and [Update Subscription](https://www.zuora.com/developer/api-reference/#operation/PUT_Subscription \"Update Subscription\") methods are changed. The following invoice related response fields are moved to the invoice container: * amount * amountWithoutTax * taxAmount * invoiceItems * targetDate * chargeMetrics # Zuora Object Model The following diagram presents a high-level view of the key Zuora objects. Click the image to open it in a new tab to resize it. <a href=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" target=\"_blank\"><img src=\"https://www.zuora.com/wp-content/uploads/2017/01/ZuoraERD.jpeg\" alt=\"Zuora Object Model Diagram\"></a> See the following articles for information about other parts of the Zuora business object model: * <a href=\"https://knowledgecenter.zuora.com/CB_Billing/Invoice_Settlement/D_Invoice_Settlement_Object_Model\" target=\"_blank\">Invoice Settlement Object Model</a> * <a href=\"https://knowledgecenter.zuora.com/BC_Subscription_Management/Orders/BA_Orders_Object_Model\" target=\"_blank\">Orders Object Model</a> You can use the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation to list the fields of each Zuora object that is available in your tenant. When you call the operation, you must specify the API name of the Zuora object. The following table provides the API name of each Zuora object: | Object | API Name | |-----------------------------------------------|--------------------------------------------| | Account | `Account` | | Accounting Code | `AccountingCode` | | Accounting Period | `AccountingPeriod` | | Amendment | `Amendment` | | Application Group | `ApplicationGroup` | | Billing Run | <p>`BillingRun`</p><p>**Note:** The API name of this object is `BillingRun` in the [Describe object](https://www.zuora.com/developer/api-reference/#operation/GET_Describe) operation, Export ZOQL queries, and Data Query. Otherwise, the API name of this object is `BillRun`.</p> | | Contact | `Contact` | | Contact Snapshot | `ContactSnapshot` | | Credit Balance Adjustment | `CreditBalanceAdjustment` | | Credit Memo | `CreditMemo` | | Credit Memo Application | `CreditMemoApplication` | | Credit Memo Application Item | `CreditMemoApplicationItem` | | Credit Memo Item | `CreditMemoItem` | | Credit Memo Part | `CreditMemoPart` | | Credit Memo Part Item | `CreditMemoPartItem` | | Credit Taxation Item | `CreditTaxationItem` | | Custom Exchange Rate | `FXCustomRate` | | Debit Memo | `DebitMemo` | | Debit Memo Item | `DebitMemoItem` | | Debit Taxation Item | `DebitTaxationItem` | | Discount Applied Metrics | `DiscountAppliedMetrics` | | Entity | `Tenant` | | Feature | `Feature` | | Gateway Reconciliation Event | `PaymentGatewayReconciliationEventLog` | | Gateway Reconciliation Job | `PaymentReconciliationJob` | | Gateway Reconciliation Log | `PaymentReconciliationLog` | | Invoice | `Invoice` | | Invoice Adjustment | `InvoiceAdjustment` | | Invoice Item | `InvoiceItem` | | Invoice Item Adjustment | `InvoiceItemAdjustment` | | Invoice Payment | `InvoicePayment` | | Journal Entry | `JournalEntry` | | Journal Entry Item | `JournalEntryItem` | | Journal Run | `JournalRun` | | Order | `Order` | | Order Action | `OrderAction` | | Order ELP | `OrderElp` | | Order Item | `OrderItem` | | Order MRR | `OrderMrr` | | Order Quantity | `OrderQuantity` | | Order TCB | `OrderTcb` | | Order TCV | `OrderTcv` | | Payment | `Payment` | | Payment Application | `PaymentApplication` | | Payment Application Item | `PaymentApplicationItem` | | Payment Method | `PaymentMethod` | | Payment Method Snapshot | `PaymentMethodSnapshot` | | Payment Method Transaction Log | `PaymentMethodTransactionLog` | | Payment Method Update | `UpdaterDetail` | | Payment Part | `PaymentPart` | | Payment Part Item | `PaymentPartItem` | | Payment Run | `PaymentRun` | | Payment Transaction Log | `PaymentTransactionLog` | | Processed Usage | `ProcessedUsage` | | Product | `Product` | | Product Feature | `ProductFeature` | | Product Rate Plan | `ProductRatePlan` | | Product Rate Plan Charge | `ProductRatePlanCharge` | | Product Rate Plan Charge Tier | `ProductRatePlanChargeTier` | | Rate Plan | `RatePlan` | | Rate Plan Charge | `RatePlanCharge` | | Rate Plan Charge Tier | `RatePlanChargeTier` | | Refund | `Refund` | | Refund Application | `RefundApplication` | | Refund Application Item | `RefundApplicationItem` | | Refund Invoice Payment | `RefundInvoicePayment` | | Refund Part | `RefundPart` | | Refund Part Item | `RefundPartItem` | | Refund Transaction Log | `RefundTransactionLog` | | Revenue Charge Summary | `RevenueChargeSummary` | | Revenue Charge Summary Item | `RevenueChargeSummaryItem` | | Revenue Event | `RevenueEvent` | | Revenue Event Credit Memo Item | `RevenueEventCreditMemoItem` | | Revenue Event Debit Memo Item | `RevenueEventDebitMemoItem` | | Revenue Event Invoice Item | `RevenueEventInvoiceItem` | | Revenue Event Invoice Item Adjustment | `RevenueEventInvoiceItemAdjustment` | | Revenue Event Item | `RevenueEventItem` | | Revenue Event Item Credit Memo Item | `RevenueEventItemCreditMemoItem` | | Revenue Event Item Debit Memo Item | `RevenueEventItemDebitMemoItem` | | Revenue Event Item Invoice Item | `RevenueEventItemInvoiceItem` | | Revenue Event Item Invoice Item Adjustment | `RevenueEventItemInvoiceItemAdjustment` | | Revenue Event Type | `RevenueEventType` | | Revenue Schedule | `RevenueSchedule` | | Revenue Schedule Credit Memo Item | `RevenueScheduleCreditMemoItem` | | Revenue Schedule Debit Memo Item | `RevenueScheduleDebitMemoItem` | | Revenue Schedule Invoice Item | `RevenueScheduleInvoiceItem` | | Revenue Schedule Invoice Item Adjustment | `RevenueScheduleInvoiceItemAdjustment` | | Revenue Schedule Item | `RevenueScheduleItem` | | Revenue Schedule Item Credit Memo Item | `RevenueScheduleItemCreditMemoItem` | | Revenue Schedule Item Debit Memo Item | `RevenueScheduleItemDebitMemoItem` | | Revenue Schedule Item Invoice Item | `RevenueScheduleItemInvoiceItem` | | Revenue Schedule Item Invoice Item Adjustment | `RevenueScheduleItemInvoiceItemAdjustment` | | Subscription | `Subscription` | | Subscription Product Feature | `SubscriptionProductFeature` | | Taxable Item Snapshot | `TaxableItemSnapshot` | | Taxation Item | `TaxationItem` | | Updater Batch | `UpdaterBatch` | | Usage | `Usage` | # noqa: E501
OpenAPI spec version: 2019-07-26
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import zuora_client
from zuora_client.models.put_write_off_invoice_response_credit_memo import PUTWriteOffInvoiceResponseCreditMemo # noqa: E501
from zuora_client.rest import ApiException
class TestPUTWriteOffInvoiceResponseCreditMemo(unittest.TestCase):
"""PUTWriteOffInvoiceResponseCreditMemo unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testPUTWriteOffInvoiceResponseCreditMemo(self):
"""Test PUTWriteOffInvoiceResponseCreditMemo"""
# FIXME: construct object with mandatory attributes with example values
# model = zuora_client.models.put_write_off_invoice_response_credit_memo.PUTWriteOffInvoiceResponseCreditMemo() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
aca3d953cd1e8d9ebdc2f0213306bab4491b589f | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/lib/surface/compute/addresses/create.py | 009544b0796daa56bbdd5968a5eb18a72122a125 | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 14,666 | py | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for reserving IP addresses."""
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import constants
from googlecloudsdk.api_lib.compute import name_generator
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import flags as compute_flags
from googlecloudsdk.command_lib.compute.addresses import flags
def _Args(cls, parser):
"""Argument parsing."""
cls.ADDRESSES_ARG = flags.AddressArgument(required=False)
cls.ADDRESSES_ARG.AddArgument(parser, operation_type='create')
flags.AddDescription(parser)
parser.display_info.AddCacheUpdater(flags.AddressesCompleter)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Create(base.CreateCommand):
"""Reserve IP addresses.
*{command}* is used to reserve one or more IP addresses. Once
an IP address is reserved, it will be associated with the
project until it is released using 'gcloud compute addresses
delete'. Ephemeral IP addresses that are in use by resources
in the project can be reserved using the `--addresses`
flag.
## EXAMPLES
To reserve three IP addresses in the `us-central1` region,
run:
$ {command} ADDRESS-1 ADDRESS-2 ADDRESS-3 --region us-central1
To reserve ephemeral IP addresses 162.222.181.198 and
23.251.146.189 which are being used by virtual machine
instances in the `us-central1` region, run:
$ {command} --addresses 162.222.181.198,23.251.146.189 --region us-central1
In the above invocation, the two addresses will be assigned
random names.
To reserve an IP address from the subnet ``default'' in the ``us-central1''
region, run:
$ {command} SUBNET-ADDRESS-1 --region us-central1 --subnet default
"""
SUBNETWORK_ARG = None
ADDRESSES_ARG = None
@classmethod
def Args(cls, parser):
_Args(cls, parser)
flags.AddAddressesAndIPVersions(parser, required=False)
cls.SUBNETWORK_ARG = flags.SubnetworkArgument()
cls.SUBNETWORK_ARG.AddArgument(parser)
def GetAddress(self, messages, args, address, address_ref, resource_parser):
if args.ip_version or (
address is None and
address_ref.Collection() == 'compute.globalAddresses'):
ip_version = messages.Address.IpVersionValueValuesEnum(
args.ip_version or 'IPV4')
else:
# IP version is only specified in global requests if an address is not
# specified to determine whether an ipv4 or ipv6 address should be
# allocated.
ip_version = None
# TODO(b/36862747): get rid of args.subnet check
if args.subnet:
if address_ref.Collection() == 'compute.globalAddresses':
raise exceptions.ToolException(
'[--subnet] may not be specified for global addresses.')
if not args.subnet_region:
args.subnet_region = address_ref.region
subnetwork_url = flags.SubnetworkArgument().ResolveAsResource(
args, resource_parser).SelfLink()
else:
subnetwork_url = None
return messages.Address(
address=address,
description=args.description,
ipVersion=ip_version,
name=address_ref.Name(),
addressType=(messages.Address.AddressTypeValueValuesEnum.INTERNAL
if subnetwork_url else None),
subnetwork=subnetwork_url)
def Run(self, args):
"""Issues requests necessary to create Addresses."""
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
names, addresses = self._GetNamesAndAddresses(args)
if not args.name:
args.name = names
address_refs = self.ADDRESSES_ARG.ResolveAsResource(
args, holder.resources,
scope_lister=compute_flags.GetDefaultScopeLister(client))
requests = []
for address, address_ref in zip(addresses, address_refs):
address_msg = self.GetAddress(client.messages, args, address, address_ref,
holder.resources)
if address_ref.Collection() == 'compute.globalAddresses':
requests.append((client.apitools_client.globalAddresses, 'Insert',
client.messages.ComputeGlobalAddressesInsertRequest(
address=address_msg, project=address_ref.project)))
elif address_ref.Collection() == 'compute.addresses':
requests.append((client.apitools_client.addresses, 'Insert',
client.messages.ComputeAddressesInsertRequest(
address=address_msg,
region=address_ref.region,
project=address_ref.project)))
return client.MakeRequests(requests)
def _GetNamesAndAddresses(self, args):
"""Returns names and addresses provided in args."""
if not args.addresses and not args.name:
raise exceptions.ToolException(
'At least one name or address must be provided.')
if args.name:
names = args.name
else:
# If we dont have any names then we must some addresses.
names = [name_generator.GenerateRandomName() for _ in args.addresses]
if args.addresses:
addresses = args.addresses
else:
# If we dont have any addresses then we must some names.
addresses = [None] * len(args.name)
if len(addresses) != len(names):
raise exceptions.ToolException(
'If providing both, you must specify the same number of names as '
'addresses.')
return names, addresses
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateBeta(Create):
"""Reserve IP addresses.
*{command}* is used to reserve one or more IP addresses. Once
an IP address is reserved, it will be associated with the
project until it is released using 'gcloud compute addresses
delete'. Ephemeral IP addresses that are in use by resources
in the project, can be reserved using the ``--addresses''
flag.
## EXAMPLES
To reserve three IP addresses in the ``us-central1'' region,
run:
$ {command} ADDRESS-1 ADDRESS-2 ADDRESS-3 --region us-central1
To reserve ephemeral IP addresses 162.222.181.198 and
23.251.146.189 which are being used by virtual machine
instances in the ``us-central1'' region, run:
$ {command} --addresses 162.222.181.198,23.251.146.189 --region us-central1
In the above invocation, the two addresses will be assigned
random names.
To reserve an IP address from the subnet ``default'' in the ``us-central1''
region, run:
$ {command} SUBNET-ADDRESS-1 --region us-central1 --subnet default
"""
SUBNETWORK_ARG = None
ADDRESSES_ARG = None
@classmethod
def Args(cls, parser):
_Args(cls, parser)
flags.AddAddressesAndIPVersions(parser, required=False)
flags.AddNetworkTier(parser)
cls.SUBNETWORK_ARG = flags.SubnetworkArgument()
cls.SUBNETWORK_ARG.AddArgument(parser)
def ConstructNetworkTier(self, messages, args):
if args.network_tier:
network_tier = args.network_tier.upper()
if network_tier in constants.NETWORK_TIER_CHOICES_FOR_INSTANCE:
return messages.Address.NetworkTierValueValuesEnum(args.network_tier)
else:
raise exceptions.InvalidArgumentException(
'--network-tier',
'Invalid network tier [{tier}]'.format(tier=network_tier))
else:
return None
def GetAddress(self, messages, args, address, address_ref, resource_parser):
"""Override."""
network_tier = self.ConstructNetworkTier(messages, args)
if args.ip_version or (
address is None and
address_ref.Collection() == 'compute.globalAddresses'):
ip_version = messages.Address.IpVersionValueValuesEnum(args.ip_version or
'IPV4')
else:
# IP version is only specified in global requests if an address is not
# specified to determine whether an ipv4 or ipv6 address should be
# allocated.
ip_version = None
# TODO(b/36862747): get rid of args.subnet check
if args.subnet:
if address_ref.Collection() == 'compute.globalAddresses':
raise exceptions.ToolException(
'[--subnet] may not be specified for global addresses.')
if not args.subnet_region:
args.subnet_region = address_ref.region
subnetwork_url = flags.SubnetworkArgument().ResolveAsResource(
args, resource_parser).SelfLink()
else:
subnetwork_url = None
return messages.Address(
address=address,
description=args.description,
networkTier=network_tier,
ipVersion=ip_version,
name=address_ref.Name(),
addressType=(messages.Address.AddressTypeValueValuesEnum.INTERNAL
if subnetwork_url else None),
subnetwork=subnetwork_url)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateAlpha(CreateBeta):
"""Reserve IP addresses.
*{command}* is used to reserve one or more IP addresses. Once
an IP address is reserved, it will be associated with the
project until it is released using 'gcloud compute addresses
delete'. Ephemeral IP addresses that are in use by resources
in the project, can be reserved using the ``--addresses''
flag.
## EXAMPLES
To reserve three IP addresses in the ``us-central1'' region,
run:
$ {command} ADDRESS-1 ADDRESS-2 ADDRESS-3 --region us-central1
To reserve ephemeral IP addresses 162.222.181.198 and
23.251.146.189 which are being used by virtual machine
instances in the ``us-central1'' region, run:
$ {command} --addresses 162.222.181.198,23.251.146.189 --region us-central1
In the above invocation, the two addresses will be assigned
random names.
To reserve an IP address from the subnet ``default'' in the ``us-central1''
region, run:
$ {command} SUBNET-ADDRESS-1 --region us-central1 --subnet default
To reserve an IP range 10.110.0.0/16 from the network ``default'' for
VPC_PEERING, run:
$ {command} IP-RANGE-1 --global --addresses 10.110.0.0 --prefix-length 16
--purpose VPC_PEERING --network default
To reserve any IP range with prefix length 16 from the network ``default'' for
VPC_PEERING, run:
$ {command} IP-RANGE-1 --global --prefix-length 16 --purpose VPC_PEERING
--network default
"""
SUBNETWORK_ARG = None
NETWORK_ARG = None
@classmethod
def Args(cls, parser):
_Args(cls, parser)
flags.AddAddressesAndIPVersions(parser, required=False)
flags.AddNetworkTier(parser)
flags.AddPrefixLength(parser)
flags.AddPurpose(parser)
cls.SUBNETWORK_ARG = flags.SubnetworkArgument()
cls.SUBNETWORK_ARG.AddArgument(parser)
cls.NETWORK_ARG = flags.NetworkArgument()
cls.NETWORK_ARG.AddArgument(parser)
def GetAddress(self, messages, args, address, address_ref, resource_parser):
"""Override."""
network_tier = self.ConstructNetworkTier(messages, args)
if args.ip_version or (
address is None and
address_ref.Collection() == 'compute.globalAddresses'):
ip_version = messages.Address.IpVersionValueValuesEnum(
args.ip_version or 'IPV4')
else:
# IP version is only specified in global requests if an address is not
# specified to determine whether an ipv4 or ipv6 address should be
# allocated.
ip_version = None
if args.subnet and args.network:
raise exceptions.ConflictingArgumentsException('--network', '--subnet')
purpose = None
if args.purpose and not args.network and not args.subnet:
raise exceptions.MinimumArgumentException(['--network', '--subnet'],
' if --purpose is specified')
# TODO(b/36862747): get rid of args.subnet check
if args.subnet:
if address_ref.Collection() == 'compute.globalAddresses':
raise exceptions.ToolException(
'[--subnet] may not be specified for global addresses.')
if not args.subnet_region:
args.subnet_region = address_ref.region
subnetwork_url = flags.SubnetworkArgument().ResolveAsResource(
args, resource_parser).SelfLink()
purpose = messages.Address.PurposeValueValuesEnum(args.purpose or
'GCE_ENDPOINT')
if purpose != messages.Address.PurposeValueValuesEnum.GCE_ENDPOINT:
raise exceptions.InvalidArgumentException(
'--purpose',
'must be GCE_ENDPOINT for regional internal addresses.')
else:
subnetwork_url = None
network_url = None
if args.network:
if address_ref.Collection() == 'compute.addresses':
raise exceptions.InvalidArgumentException(
'--network', 'network may not be specified for regional addresses.')
network_url = flags.NetworkArgument().ResolveAsResource(
args, resource_parser).SelfLink()
purpose = messages.Address.PurposeValueValuesEnum(args.purpose or
'VPC_PEERING')
if purpose != messages.Address.PurposeValueValuesEnum.VPC_PEERING:
raise exceptions.InvalidArgumentException(
'--purpose', 'must be VPC_PEERING for global internal addresses.')
if not args.prefix_length:
raise exceptions.RequiredArgumentException(
'--prefix-length',
'prefix length is needed for reserving IP ranges.')
if args.prefix_length:
if purpose != messages.Address.PurposeValueValuesEnum.VPC_PEERING:
raise exceptions.InvalidArgumentException(
'--prefix-length', 'can only be used with [--purpose VPC_PEERING].')
return messages.Address(
address=address,
prefixLength=args.prefix_length,
description=args.description,
networkTier=network_tier,
ipVersion=ip_version,
name=address_ref.Name(),
addressType=(messages.Address.AddressTypeValueValuesEnum.INTERNAL
if subnetwork_url or network_url else None),
purpose=purpose,
subnetwork=subnetwork_url,
network=network_url)
| [
"[email protected]"
] | |
f677ca474fb5707bca7e6923f812c0f9b03202fe | aa6e1dd07a71a73bc08574b76f9e57a3ce8c8286 | /077.Test_BeeWare_windows/beeware-tutorial/beeware-venv/Lib/site-packages/pip/_internal/network/cache.py | a0d55b5e992a5f85890fc06703f33dc53995a17b | [
"MIT"
] | permissive | IvanaXu/PyTools | 0aff5982f50bb300bfa950405192c78473b69537 | 358ae06eef418fde35f424909d4f13049ca9ec7b | refs/heads/master | 2023-06-07T21:45:44.242363 | 2023-06-06T16:00:25 | 2023-06-06T16:00:25 | 163,940,845 | 60 | 8 | MIT | 2022-12-23T02:49:05 | 2019-01-03T07:54:16 | Python | UTF-8 | Python | false | false | 2,329 | py | """HTTP cache implementation.
"""
import os
from contextlib import contextmanager
from pip._vendor.cachecontrol.cache import BaseCache
from pip._vendor.cachecontrol.caches import FileCache
from pip._vendor.requests.models import Response
from pip._internal.utils.filesystem import adjacent_tmp_file, replace
from pip._internal.utils.misc import ensure_dir
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Optional, Iterator
def is_from_cache(response):
# type: (Response) -> bool
return getattr(response, "from_cache", False)
@contextmanager
def suppressed_cache_errors():
# type: () -> Iterator[None]
"""If we can't access the cache then we can just skip caching and process
requests as if caching wasn't enabled.
"""
try:
yield
except (OSError, IOError):
pass
class SafeFileCache(BaseCache):
"""
A file based cache which is safe to use even when the target directory may
not be accessible or writable.
"""
def __init__(self, directory):
# type: (str) -> None
assert directory is not None, "Cache directory must not be None."
super(SafeFileCache, self).__init__()
self.directory = directory
def _get_cache_path(self, name):
# type: (str) -> str
# From cachecontrol.caches.file_cache.FileCache._fn, brought into our
# class for backwards-compatibility and to avoid using a non-public
# method.
hashed = FileCache.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
# type: (str) -> Optional[bytes]
path = self._get_cache_path(key)
with suppressed_cache_errors():
with open(path, 'rb') as f:
return f.read()
def set(self, key, value):
# type: (str, bytes) -> None
path = self._get_cache_path(key)
with suppressed_cache_errors():
ensure_dir(os.path.dirname(path))
with adjacent_tmp_file(path) as f:
f.write(value)
replace(f.name, path)
def delete(self, key):
# type: (str) -> None
path = self._get_cache_path(key)
with suppressed_cache_errors():
os.remove(path)
| [
"[email protected]"
] | |
b04538155bd3cd73f2f1271087a0b63e9be949e1 | b6303baeaa840671f1ea747d47c905779a07ffce | /edital/migrations/0015_auto_20210928_1833.py | 9a7475e433d719627343e80ac38fcc3a631bb3c5 | [] | no_license | amarantejoacil/sisnae | 89954ef9e837799750dc56274ec1207e6d39daef | 90e237a41e698cda357b8f555fbb0649f16a78b3 | refs/heads/main | 2023-08-24T23:06:47.628428 | 2021-10-27T16:26:12 | 2021-10-27T16:26:12 | 401,503,074 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 739 | py | # Generated by Django 3.2.6 on 2021-09-28 22:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('edital', '0014_auto_20210907_2050'),
]
operations = [
migrations.AddField(
model_name='edital',
name='edital_quantidade_vaga',
field=models.IntegerField(default=100, verbose_name='Quantidade de vaga'),
preserve_default=False,
),
migrations.AddField(
model_name='edital',
name='edital_valor_auxilio',
field=models.DecimalField(decimal_places=2, default=100, max_digits=8, verbose_name='Valor do auxílio'),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
feead4ae8987ec4ae2e3b66f634259e951d22ad3 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/synthetic/tree-big-4214.py | 2e4e8115bab2dbd80fa145b28bc6f637fbeefaec | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,299 | py | # Binary-search trees
class TreeNode(object):
value:int = 0
left:"TreeNode" = None
right:"TreeNode" = None
def insert(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode(x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode(x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode2(object):
value:int = 0
value2:int = 0
left:"TreeNode2" = None
left2:"TreeNode2" = None
right:"TreeNode2" = None
right2:"TreeNode2" = None
def insert(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode2(x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode2(x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode2", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode2", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode3(object):
value:int = 0
value2:int = 0
value3:int = 0
left:"TreeNode3" = None
left2:"TreeNode3" = None
left3:"TreeNode3" = None
right:"TreeNode3" = None
right2:"TreeNode3" = None
right3:"TreeNode3" = None
def insert(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode3(x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode3(x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode3", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode3", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode3", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode4(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
left:"TreeNode4" = None
left2:"TreeNode4" = None
left3:"TreeNode4" = None
left4:"TreeNode4" = None
right:"TreeNode4" = None
right2:"TreeNode4" = None
right3:"TreeNode4" = None
right4:"TreeNode4" = None
def insert(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode4(x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode4(x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def contains(self:"TreeNode4", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode4", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode4", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode4", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class TreeNode5(object):
value:int = 0
value2:int = 0
value3:int = 0
value4:int = 0
value5:int = 0
left:"TreeNode5" = None
left2:"TreeNode5" = None
left3:"TreeNode5" = None
left4:"TreeNode5" = None
left5:"TreeNode5" = None
right:"TreeNode5" = None
right2:"TreeNode5" = None
right3:"TreeNode5" = None
right4:"TreeNode5" = None
right5:"TreeNode5" = None
def insert(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert(x)
return False
def insert5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
self.left = makeNode5(x, x, x, x, x)
return True
else:
return self.left.insert(x)
elif x > self.value:
if self.right is None:
self.right = makeNode5(x, x, x, x, x)
return True
else:
return self.right.insert($Parameters)
return False
def contains(self:"TreeNode5", x:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains2(self:"TreeNode5", x:int, x2:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains3(self:"TreeNode5", x:int, x2:int, x3:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains4(self:"TreeNode5", x:int, x2:int, x3:int, x4:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
def contains5(self:"TreeNode5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if x < self.value:
if self.left is None:
return False
else:
return self.left.contains(x)
elif x > self.value:
if self.right is None:
return False
else:
return self.right.contains(x)
else:
return True
class Tree(object):
root:TreeNode = None
size:int = 0
def insert(self:"Tree", x:int) -> object:
if self.root is None:
self.root = makeNode(x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree2(object):
root:TreeNode2 = None
root2:TreeNode2 = None
size:int = 0
size2:int = 0
def insert(self:"Tree2", x:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree2", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode2(x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree2", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree2", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree3(object):
root:TreeNode3 = None
root2:TreeNode3 = None
root3:TreeNode3 = None
size:int = 0
size2:int = 0
size3:int = 0
def insert(self:"Tree3", x:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree3", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree3", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode3(x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree3", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree3", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree3", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree4(object):
root:TreeNode4 = None
root2:TreeNode4 = None
root3:TreeNode4 = None
root4:TreeNode4 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
def insert(self:"Tree4", x:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree4", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree4", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode4(x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree4", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree4", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree4", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree4", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
class Tree5(object):
root:TreeNode5 = None
root2:TreeNode5 = None
root3:TreeNode5 = None
root4:TreeNode5 = None
root5:TreeNode5 = None
size:int = 0
size2:int = 0
size3:int = 0
size4:int = 0
size5:int = 0
def insert(self:"Tree5", x:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert2(self:"Tree5", x:int, x2:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert3(self:"Tree5", x:int, x2:int, x3:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def insert5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> object:
if self.root is None:
self.root = makeNode5(x, x, x, x, x)
self.size = 1
else:
if self.root.insert(x):
self.size = self.size + 1
def contains(self:"Tree5", x:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains2(self:"Tree5", x:int, x2:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains3(self:"Tree5", x:int, x2:int, x3:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains4(self:"Tree5", x:int, x2:int, x3:int, x4:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def contains5(self:"Tree5", x:int, x2:int, x3:int, x4:int, x5:int) -> bool:
if self.root is None:
return False
else:
return self.root.contains(x)
def makeNode(x: int) -> TreeNode:
b:TreeNode = None
b = TreeNode()
b.value = x
return b
def makeNode2(x: int, x2: int) -> TreeNode2:
b:TreeNode2 = None
b2:TreeNode2 = None
b = TreeNode2()
b.value = x
return b
def makeNode3(x: int, x2: int, x3: int) -> TreeNode3:
b:TreeNode3 = None
b2:TreeNode3 = None
b3:TreeNode3 = None
b = TreeNode3()
b.value = x
return b
def makeNode4(x: int, x2: int, x3: int, x4: int) -> TreeNode4:
b:TreeNode4 = None
b2:TreeNode4 = None
b3:TreeNode4 = None
b4:TreeNode4 = None
b = TreeNode4()
b.value = x
return b
def makeNode5(x: int, x2: int, x3: int, x4: int, x5: int) -> TreeNode5:
b:TreeNode5 = None
b2:TreeNode5 = None
b3:TreeNode5 = None
b4:TreeNode5 = None
b5:TreeNode5 = None
b = TreeNode5()
b.value = x
return b
# Input parameters
n:int = 100
n2:int = 100
n3:int = 100
n4:int = 100
n5:int = 100
c:int = 4
c2:int = 4
c3:int = 4
c4:int = 4
c5:int = 4
# Data
t:Tree = None
t2:Tree = None
t3:Tree = None
t4:Tree = None
t5:Tree = None
i:int = 0
i2:int = 0
i3:int = 0
i4:int = 0
i5:int = 0
k:int = 37813
k2:int = 37813
k3:int = 37813
k4:int = 37813
k5:int = 37813
# Crunch
t = Tree()
while i < n:
t.insert(k)
k = (k * 37813) % 37831
if i % c != 0:
t.insert(i)
i = i + 1
print(t.size)
for i in [4, 8, 15, 16, 23, 42]:
if t.contains(i):
print(i)
| [
"[email protected]"
] | |
768fff722cf0d2f12f0a7428a500a54db6db3a92 | 02952fc67147a2f11a9ed8c4eb29210bec5672ed | /business/service/urls/polardb.py | 239025e7fe9457d9d1dfc284c34ba4bc1af18f10 | [] | no_license | cuijianzhe/cow | b110a70398b09a401dadc7d3ed24dfe2bae50f5b | 3539cab6e73571f84b7f17391d9a363a756f12e1 | refs/heads/main | 2023-06-04T10:33:33.975885 | 2021-06-19T10:40:36 | 2021-06-19T10:40:36 | 340,634,448 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | from django.urls import path
from business.service.apis import polardb as polardb_api
urlpatterns = [
path('service/polardb/create/', polardb_api.CreateServicePolarDBApi.as_view()),
path('service/polardb/delete/', polardb_api.DeleteServicePolarDBApi.as_view()),
path('service/polardb/list/', polardb_api.ListServicePolarDBApi.as_view()),
]
| [
"[email protected]"
] | |
bffd945b3e55d605e9bdc96d37c366719e574dc5 | 32cb0be487895629ad1184ea25e0076a43abba0a | /LifePictorial/top/api/rest/CrmGroupsGetRequest.py | fdf3e997d2a6a1433995e490178adfa406e9a607 | [] | no_license | poorevil/LifePictorial | 6814e447ec93ee6c4d5b0f1737335601899a6a56 | b3cac4aa7bb5166608f4c56e5564b33249f5abef | refs/heads/master | 2021-01-25T08:48:21.918663 | 2014-03-19T08:55:47 | 2014-03-19T08:55:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | '''
Created by auto_sdk on 2014-02-10 16:59:30
'''
from top.api.base import RestApi
class CrmGroupsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.current_page = None
self.page_size = None
def getapiname(self):
return 'taobao.crm.groups.get'
| [
"[email protected]"
] | |
d3987022176ead2e9f190e5c0da47c1505c6fba0 | dfdecc0f91c6fa0319325561ed0a20f8544f0312 | /test.py | 4b61a3c63775e29a496c1734d0afc1a30b4e6eeb | [] | no_license | ShichaoMa/MultiThreadClosing | c3807047938329a8655d65dc011173c16375240c | 43b556d9ee6a6ae11f1481675b822b2660a7c36b | refs/heads/master | 2021-01-20T19:27:04.450710 | 2017-12-09T08:58:15 | 2017-12-09T08:58:15 | 64,533,341 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 697 | py | import time
from threading import Thread
from multi_thread_closing import MultiThreadClosing
class Test(MultiThreadClosing):
name = "test_thread"
def start(self):
t1 = Thread(target=self.process)
t2 = Thread(target=self.process)
self.threads.append(t1)
self.threads.append(t2)
t1.start()
t2.start()
while filter(lambda x:x.is_alive(), self.threads):
print "main %s.."%self.alive
time.sleep(1)
def process(self):
while self.alive:
for i in range(20):
print i
time.sleep(3)
if __name__ == "__main__":
t = Test()
t.set_logger()
t.start() | [
"[email protected]"
] | |
a4f087cbb7c9c43b0ecc4c3defb3fd07e34068fa | 5bf245e55b756ca3e664d857f36db092855c7a98 | /externals/mne/beamformer/_lcmv.py | 75a9b0204b6b6c2681bca51cb1e578b8a713ac9b | [
"BSD-3-Clause"
] | permissive | kingjr/decoding_challenge_cortana_2016_3rd | b264fabbe8fb2f3788d11dc2c4deebcf217a64a5 | 26c2ebf5200b5a5cd268fa73ac3928d7257d08d3 | refs/heads/master | 2021-01-20T17:54:12.617430 | 2016-07-13T22:31:58 | 2016-07-13T22:31:58 | 63,120,115 | 10 | 2 | null | null | null | null | UTF-8 | Python | false | false | 30,679 | py | """Compute Linearly constrained minimum variance (LCMV) beamformer.
"""
# Authors: Alexandre Gramfort <[email protected]>
# Roman Goj <[email protected]>
#
# License: BSD (3-clause)
import numpy as np
from scipy import linalg
from ..io.constants import FIFF
from ..io.proj import make_projector
from ..io.pick import (
pick_types, pick_channels_forward, pick_channels_cov, pick_info)
from ..forward import _subject_from_forward
from ..minimum_norm.inverse import _get_vertno, combine_xyz, _check_reference
from ..cov import compute_whitener, compute_covariance
from ..source_estimate import _make_stc, SourceEstimate
from ..source_space import label_src_vertno_sel
from ..utils import logger, verbose, warn
from .. import Epochs
from ..externals import six
def _setup_picks(picks, info, forward, noise_cov=None):
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
ok_ch_names = set([c['ch_name'] for c in forward['info']['chs']])
if noise_cov is not None:
ok_ch_names.union(set(noise_cov.ch_names))
if noise_cov is not None and set(info['bads']) != set(noise_cov['bads']):
logger.info('info["bads"] and noise_cov["bads"] do not match, '
'excluding bad channels from both')
bads = set(info['bads'])
if noise_cov is not None:
bads.union(set(noise_cov['bads']))
ok_ch_names -= bads
ch_names = [info['chs'][k]['ch_name'] for k in picks]
ch_names = [c for c in ch_names if c in ok_ch_names]
picks = [info['ch_names'].index(k) for k in ch_names if k in
info['ch_names']]
return picks
@verbose
def _apply_lcmv(data, info, tmin, forward, noise_cov, data_cov, reg,
label=None, picks=None, pick_ori=None, rank=None,
verbose=None):
""" LCMV beamformer for evoked data, single epochs, and raw data
Parameters
----------
data : array or list / iterable
Sensor space data. If data.ndim == 2 a single observation is assumed
and a single stc is returned. If data.ndim == 3 or if data is
a list / iterable, a list of stc's is returned.
info : dict
Measurement info.
tmin : float
Time of first sample.
forward : dict
Forward operator.
noise_cov : Covariance
The noise covariance.
data_cov : Covariance
The data covariance.
reg : float
The regularization for the whitened data covariance.
label : Label
Restricts the LCMV solution to a given label.
picks : array-like of int | None
Indices (in info) of data channels. If None, MEG and EEG data channels
(without bad channels) will be used.
pick_ori : None | 'normal' | 'max-power'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept. If 'max-power', the source
orientation that maximizes output source power is chosen.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc : SourceEstimate | VolSourceEstimate (or list of thereof)
Source time courses.
"""
is_free_ori, ch_names, proj, vertno, G = \
_prepare_beamformer_input(info, forward, label, picks, pick_ori)
# Handle whitening + data covariance
whitener, _ = compute_whitener(noise_cov, info, picks, rank=rank)
# whiten the leadfield
G = np.dot(whitener, G)
# Apply SSPs + whitener to data covariance
data_cov = pick_channels_cov(data_cov, include=ch_names)
Cm = data_cov['data']
if info['projs']:
Cm = np.dot(proj, np.dot(Cm, proj.T))
Cm = np.dot(whitener, np.dot(Cm, whitener.T))
# Calculating regularized inverse, equivalent to an inverse operation after
# the following regularization:
# Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
Cm_inv = linalg.pinv(Cm, reg)
# Compute spatial filters
W = np.dot(G.T, Cm_inv)
n_orient = 3 if is_free_ori else 1
n_sources = G.shape[1] // n_orient
for k in range(n_sources):
Wk = W[n_orient * k: n_orient * k + n_orient]
Gk = G[:, n_orient * k: n_orient * k + n_orient]
Ck = np.dot(Wk, Gk)
# Find source orientation maximizing output source power
if pick_ori == 'max-power':
eig_vals, eig_vecs = linalg.eigh(Ck)
# Choosing the eigenvector associated with the middle eigenvalue.
# The middle and not the minimal eigenvalue is used because MEG is
# insensitive to one (radial) of the three dipole orientations and
# therefore the smallest eigenvalue reflects mostly noise.
for i in range(3):
if i != eig_vals.argmax() and i != eig_vals.argmin():
idx_middle = i
# TODO: The eigenvector associated with the smallest eigenvalue
# should probably be used when using combined EEG and MEG data
max_ori = eig_vecs[:, idx_middle]
Wk[:] = np.dot(max_ori, Wk)
Ck = np.dot(max_ori, np.dot(Ck, max_ori))
is_free_ori = False
if is_free_ori:
# Free source orientation
Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
else:
# Fixed source orientation
Wk /= Ck
# Pick source orientation maximizing output source power
if pick_ori == 'max-power':
W = W[0::3]
# Preparing noise normalization
noise_norm = np.sum(W ** 2, axis=1)
if is_free_ori:
noise_norm = np.sum(np.reshape(noise_norm, (-1, 3)), axis=1)
noise_norm = np.sqrt(noise_norm)
# Pick source orientation normal to cortical surface
if pick_ori == 'normal':
W = W[2::3]
is_free_ori = False
# Applying noise normalization
if not is_free_ori:
W /= noise_norm[:, None]
if isinstance(data, np.ndarray) and data.ndim == 2:
data = [data]
return_single = True
else:
return_single = False
subject = _subject_from_forward(forward)
for i, M in enumerate(data):
if len(M) != len(picks):
raise ValueError('data and picks must have the same length')
if not return_single:
logger.info("Processing epoch : %d" % (i + 1))
# SSP and whitening
if info['projs']:
M = np.dot(proj, M)
M = np.dot(whitener, M)
# project to source space using beamformer weights
if is_free_ori:
sol = np.dot(W, M)
logger.info('combining the current components...')
sol = combine_xyz(sol)
sol /= noise_norm[:, None]
else:
# Linear inverse: do computation here or delayed
if M.shape[0] < W.shape[0] and pick_ori != 'max-power':
sol = (W, M)
else:
sol = np.dot(W, M)
if pick_ori == 'max-power':
sol = np.abs(sol)
tstep = 1.0 / info['sfreq']
yield _make_stc(sol, vertices=vertno, tmin=tmin, tstep=tstep,
subject=subject)
logger.info('[done]')
def _prepare_beamformer_input(info, forward, label, picks, pick_ori):
"""Input preparation common for all beamformer functions.
Check input values, prepare channel list and gain matrix. For documentation
of parameters, please refer to _apply_lcmv.
"""
is_free_ori = forward['source_ori'] == FIFF.FIFFV_MNE_FREE_ORI
if pick_ori in ['normal', 'max-power'] and not is_free_ori:
raise ValueError('Normal or max-power orientation can only be picked '
'when a forward operator with free orientation is '
'used.')
if pick_ori == 'normal' and not forward['surf_ori']:
raise ValueError('Normal orientation can only be picked when a '
'forward operator oriented in surface coordinates is '
'used.')
if pick_ori == 'normal' and not forward['src'][0]['type'] == 'surf':
raise ValueError('Normal orientation can only be picked when a '
'forward operator with a surface-based source space '
'is used.')
# Restrict forward solution to selected channels
info_ch_names = [c['ch_name'] for c in info['chs']]
ch_names = [info_ch_names[k] for k in picks]
fwd_ch_names = forward['sol']['row_names']
# Keep channels in forward present in info:
fwd_ch_names = [c for c in fwd_ch_names if c in info_ch_names]
forward = pick_channels_forward(forward, fwd_ch_names)
picks_forward = [fwd_ch_names.index(c) for c in ch_names]
# Get gain matrix (forward operator)
if label is not None:
vertno, src_sel = label_src_vertno_sel(label, forward['src'])
if is_free_ori:
src_sel = 3 * src_sel
src_sel = np.c_[src_sel, src_sel + 1, src_sel + 2]
src_sel = src_sel.ravel()
G = forward['sol']['data'][:, src_sel]
else:
vertno = _get_vertno(forward['src'])
G = forward['sol']['data']
# Apply SSPs
proj, ncomp, _ = make_projector(info['projs'], fwd_ch_names)
if info['projs']:
G = np.dot(proj, G)
# Pick after applying the projections
G = G[picks_forward]
proj = proj[np.ix_(picks_forward, picks_forward)]
return is_free_ori, ch_names, proj, vertno, G
@verbose
def lcmv(evoked, forward, noise_cov, data_cov, reg=0.01, label=None,
pick_ori=None, picks=None, rank=None, verbose=None):
"""Linearly Constrained Minimum Variance (LCMV) beamformer.
Compute Linearly Constrained Minimum Variance (LCMV) beamformer
on evoked data.
NOTE : This implementation has not been heavily tested so please
report any issue or suggestions.
Parameters
----------
evoked : Evoked
Evoked data to invert
forward : dict
Forward operator
noise_cov : Covariance
The noise covariance
data_cov : Covariance
The data covariance
reg : float
The regularization for the whitened data covariance.
label : Label
Restricts the LCMV solution to a given label
pick_ori : None | 'normal' | 'max-power'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept. If 'max-power', the source
orientation that maximizes output source power is chosen.
picks : array-like of int
Channel indices to use for beamforming (if None all channels
are used except bad channels).
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc : SourceEstimate | VolSourceEstimate
Source time courses
See Also
--------
lcmv_raw, lcmv_epochs
Notes
-----
The original reference is:
Van Veen et al. Localization of brain electrical activity via linearly
constrained minimum variance spatial filtering.
Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
The reference for finding the max-power orientation is:
Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
beamformers for neuromagnetic source reconstruction.
Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
"""
_check_reference(evoked)
info = evoked.info
data = evoked.data
tmin = evoked.times[0]
picks = _setup_picks(picks, info, forward, noise_cov)
data = data[picks]
stc = _apply_lcmv(
data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
pick_ori=pick_ori)
return six.advance_iterator(stc)
@verbose
def lcmv_epochs(epochs, forward, noise_cov, data_cov, reg=0.01, label=None,
pick_ori=None, return_generator=False, picks=None, rank=None,
verbose=None):
"""Linearly Constrained Minimum Variance (LCMV) beamformer.
Compute Linearly Constrained Minimum Variance (LCMV) beamformer
on single trial data.
NOTE : This implementation has not been heavily tested so please
report any issue or suggestions.
Parameters
----------
epochs : Epochs
Single trial epochs.
forward : dict
Forward operator.
noise_cov : Covariance
The noise covariance.
data_cov : Covariance
The data covariance.
reg : float
The regularization for the whitened data covariance.
label : Label
Restricts the LCMV solution to a given label.
pick_ori : None | 'normal' | 'max-power'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept. If 'max-power', the source
orientation that maximizes output source power is chosen.
return_generator : bool
Return a generator object instead of a list. This allows iterating
over the stcs without having to keep them all in memory.
picks : array-like of int
Channel indices to use for beamforming (if None all channels
are used except bad channels).
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc: list | generator of (SourceEstimate | VolSourceEstimate)
The source estimates for all epochs
See Also
--------
lcmv_raw, lcmv
Notes
-----
The original reference is:
Van Veen et al. Localization of brain electrical activity via linearly
constrained minimum variance spatial filtering.
Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
The reference for finding the max-power orientation is:
Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
beamformers for neuromagnetic source reconstruction.
Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
"""
_check_reference(epochs)
info = epochs.info
tmin = epochs.times[0]
picks = _setup_picks(picks, info, forward, noise_cov)
data = epochs.get_data()[:, picks, :]
stcs = _apply_lcmv(
data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
pick_ori=pick_ori)
if not return_generator:
stcs = [s for s in stcs]
return stcs
@verbose
def lcmv_raw(raw, forward, noise_cov, data_cov, reg=0.01, label=None,
start=None, stop=None, picks=None, pick_ori=None, rank=None,
verbose=None):
"""Linearly Constrained Minimum Variance (LCMV) beamformer.
Compute Linearly Constrained Minimum Variance (LCMV) beamformer
on raw data.
NOTE : This implementation has not been heavily tested so please
report any issue or suggestions.
Parameters
----------
raw : mne.io.Raw
Raw data to invert.
forward : dict
Forward operator.
noise_cov : Covariance
The noise covariance.
data_cov : Covariance
The data covariance.
reg : float
The regularization for the whitened data covariance.
label : Label
Restricts the LCMV solution to a given label.
start : int
Index of first time sample (index not time is seconds).
stop : int
Index of first time sample not to include (index not time is seconds).
picks : array-like of int
Channel indices to use for beamforming (if None all channels
are used except bad channels).
pick_ori : None | 'normal' | 'max-power'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept. If 'max-power', the source
orientation that maximizes output source power is chosen.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc : SourceEstimate | VolSourceEstimate
Source time courses
See Also
--------
lcmv, lcmv_epochs
Notes
-----
The original reference is:
Van Veen et al. Localization of brain electrical activity via linearly
constrained minimum variance spatial filtering.
Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
The reference for finding the max-power orientation is:
Sekihara et al. Asymptotic SNR of scalar and vector minimum-variance
beamformers for neuromagnetic source reconstruction.
Biomedical Engineering (2004) vol. 51 (10) pp. 1726--34
"""
_check_reference(raw)
info = raw.info
picks = _setup_picks(picks, info, forward, noise_cov)
data, times = raw[picks, start:stop]
tmin = times[0]
stc = _apply_lcmv(
data=data, info=info, tmin=tmin, forward=forward, noise_cov=noise_cov,
data_cov=data_cov, reg=reg, label=label, picks=picks, rank=rank,
pick_ori=pick_ori)
return six.advance_iterator(stc)
@verbose
def _lcmv_source_power(info, forward, noise_cov, data_cov, reg=0.01,
label=None, picks=None, pick_ori=None,
rank=None, verbose=None):
"""Linearly Constrained Minimum Variance (LCMV) beamformer.
Calculate source power in a time window based on the provided data
covariance. Noise covariance is used to whiten the data covariance making
the output equivalent to the neural activity index as defined by
Van Veen et al. 1997.
NOTE : This implementation has not been heavily tested so please
report any issues or suggestions.
Parameters
----------
info : dict
Measurement info, e.g. epochs.info.
forward : dict
Forward operator.
noise_cov : Covariance
The noise covariance.
data_cov : Covariance
The data covariance.
reg : float
The regularization for the whitened data covariance.
label : Label | None
Restricts the solution to a given label.
picks : array-like of int | None
Indices (in info) of data channels. If None, MEG and EEG data channels
(without bad channels) will be used.
pick_ori : None | 'normal'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept.
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stc : SourceEstimate
Source power with a single time point representing the entire time
window for which data covariance was calculated.
Notes
-----
The original reference is:
Van Veen et al. Localization of brain electrical activity via linearly
constrained minimum variance spatial filtering.
Biomedical Engineering (1997) vol. 44 (9) pp. 867--880
"""
if picks is None:
picks = pick_types(info, meg=True, eeg=True, ref_meg=False,
exclude='bads')
is_free_ori, ch_names, proj, vertno, G =\
_prepare_beamformer_input(
info, forward, label, picks, pick_ori)
# Handle whitening
info = pick_info(
info, [info['ch_names'].index(k) for k in ch_names
if k in info['ch_names']])
whitener, _ = compute_whitener(noise_cov, info, picks, rank=rank)
# whiten the leadfield
G = np.dot(whitener, G)
# Apply SSPs + whitener to data covariance
data_cov = pick_channels_cov(data_cov, include=ch_names)
Cm = data_cov['data']
if info['projs']:
Cm = np.dot(proj, np.dot(Cm, proj.T))
Cm = np.dot(whitener, np.dot(Cm, whitener.T))
# Calculating regularized inverse, equivalent to an inverse operation after
# the following regularization:
# Cm += reg * np.trace(Cm) / len(Cm) * np.eye(len(Cm))
Cm_inv = linalg.pinv(Cm, reg)
# Compute spatial filters
W = np.dot(G.T, Cm_inv)
n_orient = 3 if is_free_ori else 1
n_sources = G.shape[1] // n_orient
source_power = np.zeros((n_sources, 1))
for k in range(n_sources):
Wk = W[n_orient * k: n_orient * k + n_orient]
Gk = G[:, n_orient * k: n_orient * k + n_orient]
Ck = np.dot(Wk, Gk)
if is_free_ori:
# Free source orientation
Wk[:] = np.dot(linalg.pinv(Ck, 0.1), Wk)
else:
# Fixed source orientation
Wk /= Ck
# Noise normalization
noise_norm = np.dot(Wk, Wk.T)
noise_norm = noise_norm.trace()
# Calculating source power
sp_temp = np.dot(np.dot(Wk, Cm), Wk.T)
sp_temp /= max(noise_norm, 1e-40) # Avoid division by 0
if pick_ori == 'normal':
source_power[k, 0] = sp_temp[2, 2]
else:
source_power[k, 0] = sp_temp.trace()
logger.info('[done]')
subject = _subject_from_forward(forward)
return SourceEstimate(source_power, vertices=vertno, tmin=1,
tstep=1, subject=subject)
@verbose
def tf_lcmv(epochs, forward, noise_covs, tmin, tmax, tstep, win_lengths,
freq_bins, subtract_evoked=False, reg=0.01, label=None,
pick_ori=None, n_jobs=1, picks=None, rank=None, verbose=None):
"""5D time-frequency beamforming based on LCMV.
Calculate source power in time-frequency windows using a spatial filter
based on the Linearly Constrained Minimum Variance (LCMV) beamforming
approach. Band-pass filtered epochs are divided into time windows from
which covariance is computed and used to create a beamformer spatial
filter.
NOTE : This implementation has not been heavily tested so please
report any issues or suggestions.
Parameters
----------
epochs : Epochs
Single trial epochs.
forward : dict
Forward operator.
noise_covs : list of instances of Covariance
Noise covariance for each frequency bin.
tmin : float
Minimum time instant to consider.
tmax : float
Maximum time instant to consider.
tstep : float
Spacing between consecutive time windows, should be smaller than or
equal to the shortest time window length.
win_lengths : list of float
Time window lengths in seconds. One time window length should be
provided for each frequency bin.
freq_bins : list of tuples of float
Start and end point of frequency bins of interest.
subtract_evoked : bool
If True, subtract the averaged evoked response prior to computing the
tf source grid.
reg : float
The regularization for the whitened data covariance.
label : Label | None
Restricts the solution to a given label.
pick_ori : None | 'normal'
If 'normal', rather than pooling the orientations by taking the norm,
only the radial component is kept.
n_jobs : int | str
Number of jobs to run in parallel. Can be 'cuda' if scikits.cuda
is installed properly and CUDA is initialized.
picks : array-like of int
Channel indices to use for beamforming (if None all channels
are used except bad channels).
rank : None | int | dict
Specified rank of the noise covariance matrix. If None, the rank is
detected automatically. If int, the rank is specified for the MEG
channels. A dictionary with entries 'eeg' and/or 'meg' can be used
to specify the rank for each modality.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
stcs : list of SourceEstimate
Source power at each time window. One SourceEstimate object is returned
for each frequency bin.
Notes
-----
The original reference is:
Dalal et al. Five-dimensional neuroimaging: Localization of the
time-frequency dynamics of cortical activity.
NeuroImage (2008) vol. 40 (4) pp. 1686-1700
"""
_check_reference(epochs)
if pick_ori not in [None, 'normal']:
raise ValueError('Unrecognized orientation option in pick_ori, '
'available choices are None and normal')
if len(noise_covs) != len(freq_bins):
raise ValueError('One noise covariance object expected per frequency '
'bin')
if len(win_lengths) != len(freq_bins):
raise ValueError('One time window length expected per frequency bin')
if any(win_length < tstep for win_length in win_lengths):
raise ValueError('Time step should not be larger than any of the '
'window lengths')
# Extract raw object from the epochs object
raw = epochs._raw
if raw is None:
raise ValueError('The provided epochs object does not contain the '
'underlying raw object. Please use preload=False '
'when constructing the epochs object')
picks = _setup_picks(picks, epochs.info, forward, noise_covs[0])
ch_names = [epochs.ch_names[k] for k in picks]
# Use picks from epochs for picking channels in the raw object
raw_picks = [raw.ch_names.index(c) for c in ch_names]
# Make sure epochs.events contains only good events:
epochs.drop_bad()
# Multiplying by 1e3 to avoid numerical issues, e.g. 0.3 // 0.05 == 5
n_time_steps = int(((tmax - tmin) * 1e3) // (tstep * 1e3))
sol_final = []
for (l_freq, h_freq), win_length, noise_cov in \
zip(freq_bins, win_lengths, noise_covs):
n_overlap = int((win_length * 1e3) // (tstep * 1e3))
raw_band = raw.copy()
raw_band.filter(l_freq, h_freq, picks=raw_picks, method='iir',
n_jobs=n_jobs)
raw_band.info['highpass'] = l_freq
raw_band.info['lowpass'] = h_freq
epochs_band = Epochs(raw_band, epochs.events, epochs.event_id,
tmin=epochs.tmin, tmax=epochs.tmax, baseline=None,
picks=raw_picks, proj=epochs.proj, preload=True)
del raw_band
if subtract_evoked:
epochs_band.subtract_evoked()
sol_single = []
sol_overlap = []
for i_time in range(n_time_steps):
win_tmin = tmin + i_time * tstep
win_tmax = win_tmin + win_length
# If in the last step the last time point was not covered in
# previous steps and will not be covered now, a solution needs to
# be calculated for an additional time window
if i_time == n_time_steps - 1 and win_tmax - tstep < tmax and\
win_tmax >= tmax + (epochs.times[-1] - epochs.times[-2]):
warn('Adding a time window to cover last time points')
win_tmin = tmax - win_length
win_tmax = tmax
if win_tmax < tmax + (epochs.times[-1] - epochs.times[-2]):
logger.info('Computing time-frequency LCMV beamformer for '
'time window %d to %d ms, in frequency range '
'%d to %d Hz' % (win_tmin * 1e3, win_tmax * 1e3,
l_freq, h_freq))
# Counteracts unsafe floating point arithmetic ensuring all
# relevant samples will be taken into account when selecting
# data in time windows
win_tmin = win_tmin - 1e-10
win_tmax = win_tmax + 1e-10
# Calculating data covariance from filtered epochs in current
# time window
data_cov = compute_covariance(epochs_band, tmin=win_tmin,
tmax=win_tmax)
stc = _lcmv_source_power(epochs_band.info, forward, noise_cov,
data_cov, reg=reg, label=label,
pick_ori=pick_ori, verbose=verbose)
sol_single.append(stc.data[:, 0])
# Average over all time windows that contain the current time
# point, which is the current time window along with
# n_overlap - 1 previous ones
if i_time - n_overlap < 0:
curr_sol = np.mean(sol_single[0:i_time + 1], axis=0)
else:
curr_sol = np.mean(sol_single[i_time - n_overlap + 1:
i_time + 1], axis=0)
# The final result for the current time point in the current
# frequency bin
sol_overlap.append(curr_sol)
# Gathering solutions for all time points for current frequency bin
sol_final.append(sol_overlap)
sol_final = np.array(sol_final)
# Creating stc objects containing all time points for each frequency bin
stcs = []
for i_freq, _ in enumerate(freq_bins):
stc = SourceEstimate(sol_final[i_freq, :, :].T, vertices=stc.vertices,
tmin=tmin, tstep=tstep, subject=stc.subject)
stcs.append(stc)
return stcs
| [
"[email protected]"
] | |
878f78437dc5e1bec4b5c66bd1443295fcebfb4e | bcf678908eb3e26f6172265406bfaaa7129f6b18 | /Blog/myapp/views.py | 64d2228c228fc572bf4ed3eb5100262b4f3071d9 | [] | no_license | loganjoon/0713-Blog | 935cbd75c8682ff6bc6841bc414ad0db3225a917 | 71494795515753b6a354e1b93ed57858e852a4a5 | refs/heads/master | 2022-11-17T00:09:40.770351 | 2020-07-13T02:31:34 | 2020-07-13T02:31:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | from django.shortcuts import render
from .models import BlogFrame
from django.shortcuts import render, get_object_or_404
def main(request):
blogs = BlogFrame.objects
return render(request, 'main.html',{'blogs':blogs})
def detail(request, blog_id):
blog_detail = get_object_or_404(BlogFrame, pk=blog_id)
return render(request, '/detail.html', {'blogdetail': blog_detail})
# Create your views here.
| [
"[email protected]"
] | |
5649862f39c4121adba3f3cf54160b5251b6ff8e | 242da8865e037f9fffb76269c3acddb73ce9fa14 | /packages/pyright-internal/src/tests/samples/forLoop1.py | 6f5ced2b691c9f7c57d066e0809a9261e765695a | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | khyveasna11111908/pyright | f42eceae044f6fbc27552c1765b03ebd345a451c | 493d47807b96137995e4bb6ca341930e4de911f9 | refs/heads/main | 2023-08-30T00:08:36.191799 | 2021-09-25T19:17:13 | 2021-09-25T19:17:13 | 410,361,483 | 1 | 1 | NOASSERTION | 2021-09-25T19:15:23 | 2021-09-25T19:15:22 | null | UTF-8 | Python | false | false | 1,185 | py | # This sample tests 'for' operations (both simple for loops
# and list comprehension for loops).
from typing import AsyncIterator, List, Iterator
def requires_int(val: int):
pass
list1 = [1, 2, 3] # type: List[int]
for a in list1:
requires_int(a)
int1 = 1
# This should generate an error because
# an int type is not iterable.
for foo1 in int1:
pass
async def func1():
# This should generate an error because
# list1 isn't an async iterator.
async for foo2 in list1:
requires_int(foo2)
class AsyncIterable1(object):
def __aiter__(self):
return self
async def __anext__(self):
return 1
iter1 = AsyncIterable1()
async def func2():
async for foo3 in iter1:
requires_int(foo3)
for d in [b for b in list1]:
requires_int(d)
for e in [b async for b in iter1]:
requires_int(e)
class ClassWithGetItem(object):
def __getitem__(self, item) -> str:
return "hello"
def testGetItemIterator() -> str:
objWithGetItem = ClassWithGetItem()
for f in objWithGetItem:
return f
return "none"
# This should generate a syntax error.
for in range(3):
pass
| [
"[email protected]"
] | |
5348b105b39d20eb47abfa9721d17ff45cc83590 | d1aa6e7d5631d7806531660febbd1f856eaeece7 | /python/paddle/distribution/normal.py | 8a9e5cd7372a7ef98548986242a286f6f14efc4c | [
"Apache-2.0"
] | permissive | gongweibao/Paddle | 510cd4bc0ef89bc6ccee7b6b8eca52c00e014b77 | 60f9c60cd8196c66c391d79c35d341e9072f8838 | refs/heads/develop | 2023-03-13T17:43:35.675875 | 2022-09-20T08:46:15 | 2022-09-20T08:46:15 | 82,279,237 | 3 | 2 | Apache-2.0 | 2021-05-26T06:17:43 | 2017-02-17T09:16:16 | Python | UTF-8 | Python | false | false | 10,689 | py | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import warnings
import numpy as np
from paddle import _C_ops, _legacy_C_ops
from paddle.distribution import distribution
from paddle.fluid import core
from paddle.fluid.data_feeder import (check_dtype, check_type,
check_variable_and_dtype, convert_dtype)
from paddle.fluid.framework import _non_static_mode, in_dygraph_mode
from paddle.fluid.layers import (control_flow, elementwise_add, elementwise_div,
elementwise_mul, elementwise_sub, nn, ops,
tensor)
class Normal(distribution.Distribution):
r"""The Normal distribution with location `loc` and `scale` parameters.
Mathematical details
The probability density function (pdf) is
.. math::
pdf(x; \mu, \sigma) = \\frac{1}{Z}e^{\\frac {-0.5 (x - \mu)^2} {\sigma^2} }
.. math::
Z = (2 \pi \sigma^2)^{0.5}
In the above equation:
* :math:`loc = \mu`: is the mean.
* :math:`scale = \sigma`: is the std.
* :math:`Z`: is the normalization constant.
Args:
loc(int|float|list|tuple|numpy.ndarray|Tensor): The mean of normal distribution.The data type is int, float, list, numpy.ndarray or Tensor.
scale(int|float|list|tuple|numpy.ndarray|Tensor): The std of normal distribution.The data type is int, float, list, numpy.ndarray or Tensor.
name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Examples:
.. code-block:: python
import paddle
from paddle.distribution import Normal
# Define a single scalar Normal distribution.
dist = Normal(loc=0., scale=3.)
# Define a batch of two scalar valued Normals.
# The first has mean 1 and standard deviation 11, the second 2 and 22.
dist = Normal(loc=[1., 2.], scale=[11., 22.])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
# Define a batch of two scalar valued Normals.
# Both have mean 1, but different standard deviations.
dist = Normal(loc=1., scale=[11., 22.])
# Complete example
value_tensor = paddle.to_tensor([0.8], dtype="float32")
normal_a = Normal([0.], [1.])
normal_b = Normal([0.5], [2.])
sample = normal_a.sample([2])
# a random tensor created by normal distribution with shape: [2, 1]
entropy = normal_a.entropy()
# [1.4189385] with shape: [1]
lp = normal_a.log_prob(value_tensor)
# [-1.2389386] with shape: [1]
p = normal_a.probs(value_tensor)
# [0.28969154] with shape: [1]
kl = normal_a.kl_divergence(normal_b)
# [0.34939718] with shape: [1]
"""
def __init__(self, loc, scale, name=None):
if not _non_static_mode():
check_type(loc, 'loc',
(int, float, np.ndarray, tensor.Variable, list, tuple),
'Normal')
check_type(scale, 'scale',
(int, float, np.ndarray, tensor.Variable, list, tuple),
'Normal')
self.batch_size_unknown = False
self.all_arg_is_float = False
self.name = name if name is not None else 'Normal'
self.dtype = 'float32'
if isinstance(loc, int):
loc = float(loc)
if isinstance(scale, int):
scale = float(scale)
if self._validate_args(loc, scale):
self.batch_size_unknown = True
self.loc = loc
self.scale = scale
self.dtype = convert_dtype(loc.dtype)
else:
if isinstance(loc, float) and isinstance(scale, float):
self.all_arg_is_float = True
if isinstance(loc, np.ndarray) and str(
loc.dtype) in ['float32', 'float64']:
self.dtype = loc.dtype
elif isinstance(scale, np.ndarray) and str(
scale.dtype) in ['float32', 'float64']:
self.dtype = scale.dtype
# pylint: disable=unbalanced-tuple-unpacking
self.loc, self.scale = self._to_tensor(loc, scale)
if self.dtype != convert_dtype(self.loc.dtype):
self.loc = tensor.cast(self.loc, dtype=self.dtype)
self.scale = tensor.cast(self.scale, dtype=self.dtype)
super(Normal, self).__init__(self.loc.shape)
def sample(self, shape, seed=0):
"""Generate samples of the specified shape.
Args:
shape (list): 1D `int32`. Shape of the generated samples.
seed (int): Python integer number.
Returns:
Tensor: A tensor with prepended dimensions shape.The data type is float32.
"""
if not _non_static_mode():
check_type(shape, 'shape', (list), 'sample')
check_type(seed, 'seed', (int), 'sample')
batch_shape = list((self.loc + self.scale).shape)
name = self.name + '_sample'
if self.batch_size_unknown:
output_shape = shape + batch_shape
zero_tmp = tensor.fill_constant_batch_size_like(
self.loc + self.scale, batch_shape + shape, self.dtype, 0.)
zero_tmp_reshape = nn.reshape(zero_tmp, output_shape)
zero_tmp_shape = nn.shape(zero_tmp_reshape)
normal_random_tmp = nn.gaussian_random(zero_tmp_shape,
mean=0.,
std=1.,
seed=seed,
dtype=self.dtype)
output = normal_random_tmp * (zero_tmp_reshape + self.scale)
output = elementwise_add(output, self.loc, name=name)
return output
else:
output_shape = shape + batch_shape
output = nn.gaussian_random(output_shape, mean=0., std=1., seed=seed, dtype=self.dtype) * \
(tensor.zeros(output_shape, dtype=self.dtype) + self.scale)
output = elementwise_add(output, self.loc, name=name)
if self.all_arg_is_float:
return nn.reshape(output, shape, name=name)
else:
return output
def entropy(self):
r"""Shannon entropy in nats.
The entropy is
.. math::
entropy(\sigma) = 0.5 \\log (2 \pi e \sigma^2)
In the above equation:
* :math:`scale = \sigma`: is the std.
Returns:
Tensor: Shannon entropy of normal distribution.The data type is float32.
"""
name = self.name + '_entropy'
batch_shape = list((self.loc + self.scale).shape)
zero_tmp = tensor.fill_constant_batch_size_like(self.loc + self.scale,
batch_shape, self.dtype,
0.)
return elementwise_add(0.5 + zero_tmp,
0.5 * math.log(2 * math.pi) + nn.log(
(self.scale + zero_tmp)),
name=name)
def log_prob(self, value):
"""Log probability density/mass function.
Args:
value (Tensor): The input tensor.
Returns:
Tensor: log probability.The data type is same with value.
"""
name = self.name + '_log_prob'
value = self._check_values_dtype_in_probs(self.loc, value)
var = self.scale * self.scale
log_scale = nn.log(self.scale)
return elementwise_sub(-1. * ((value - self.loc) * (value - self.loc)) /
(2. * var),
log_scale + math.log(math.sqrt(2. * math.pi)),
name=name)
def probs(self, value):
"""Probability density/mass function.
Args:
value (Tensor): The input tensor.
Returns:
Tensor: probability.The data type is same with value.
"""
name = self.name + '_probs'
value = self._check_values_dtype_in_probs(self.loc, value)
var = self.scale * self.scale
return elementwise_div(ops.exp(-1. * ((value - self.loc) *
(value - self.loc)) / (2. * var)),
(math.sqrt(2 * math.pi) * self.scale),
name=name)
def kl_divergence(self, other):
r"""The KL-divergence between two normal distributions.
The probability density function (pdf) is
.. math::
KL\_divergence(\mu_0, \sigma_0; \mu_1, \sigma_1) = 0.5 (ratio^2 + (\\frac{diff}{\sigma_1})^2 - 1 - 2 \\ln {ratio})
.. math::
ratio = \\frac{\sigma_0}{\sigma_1}
.. math::
diff = \mu_1 - \mu_0
In the above equation:
* :math:`loc = \mu_0`: is the mean of current Normal distribution.
* :math:`scale = \sigma_0`: is the std of current Normal distribution.
* :math:`loc = \mu_1`: is the mean of other Normal distribution.
* :math:`scale = \sigma_1`: is the std of other Normal distribution.
* :math:`ratio`: is the ratio of scales.
* :math:`diff`: is the difference between means.
Args:
other (Normal): instance of Normal.
Returns:
Tensor: kl-divergence between two normal distributions.The data type is float32.
"""
if not _non_static_mode():
check_type(other, 'other', Normal, 'kl_divergence')
name = self.name + '_kl_divergence'
var_ratio = self.scale / other.scale
var_ratio = (var_ratio * var_ratio)
t1 = (self.loc - other.loc) / other.scale
t1 = (t1 * t1)
return elementwise_add(0.5 * var_ratio,
0.5 * (t1 - 1. - nn.log(var_ratio)),
name=name)
| [
"[email protected]"
] | |
484234961357522c403302d254ccabdc4df0e383 | f3f10bb0ec28489d3111c72ce9811b01fa629d64 | /setup.py | ada05e6131d7ef1e7ee185a5fae1c8a5dfe88d3b | [
"BSD-2-Clause"
] | permissive | gitter-badger/labscript | db0e6f1a0c49a78f6dc08efea8607bce499a26a4 | 26f68923c71a56d84e19ae2ab894d2f4d6bdd9b4 | refs/heads/master | 2022-04-26T02:40:36.586340 | 2020-05-02T17:33:28 | 2020-05-02T17:33:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,497 | py | # USAGE NOTES
#
# Make a PyPI release tarball with:
#
# python setup.py sdist
#
# Upload to test PyPI with:
#
# twine upload --repository-url https://test.pypi.org/legacy/ dist/*
#
# Install from test PyPI with:
#
# pip install --index-url https://test.pypi.org/simple/ labscript
#
# Upload to real PyPI with:
#
# twine upload dist/*
#
# Build conda packages for all platforms (in a conda environment with setuptools_conda
# installed) with:
#
# python setup.py dist_conda
#
# Upoad to your own account (for testing) on anaconda cloud (in a conda environment with
# anaconda-client installed) with:
#
# anaconda upload --skip-existing conda_packages/*/*
#
# (Trickier on Windows, as it won't expand the wildcards)
#
# Upoad to the labscript-suite organisation's channel on anaconda cloud (in a
# conda environment with anaconda-client installed) with:
#
# anaconda upload -u labscript-suite --skip-existing conda_packages/*/*
#
# If you need to rebuild the same version of the package for conda due to a packaging
# issue, you must increment CONDA_BUILD_NUMBER in order to create a unique version on
# anaconda cloud. When subsequently releasing a new version of the package,
# CONDA_BUILD_NUMBER should be reset to zero.
import os
from setuptools import setup
try:
from setuptools_conda import dist_conda
except ImportError:
dist_conda = None
SETUP_REQUIRES = ['setuptools', 'setuptools_scm']
INSTALL_REQUIRES = [
"labscript_utils >=2.14.0",
"numpy >=1.15",
"scipy",
"matplotlib",
]
setup(
name='labscript',
use_scm_version=True,
description="The labscript compiler",
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='The labscript suite community',
author_email='[email protected] ',
url='http://labscriptsuite.org',
license="BSD",
packages=["labscript"],
zip_safe=False,
setup_requires=SETUP_REQUIRES,
include_package_data=True,
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5",
install_requires=INSTALL_REQUIRES if 'CONDA_BUILD' not in os.environ else [],
cmdclass={'dist_conda': dist_conda} if dist_conda is not None else {},
command_options={
'dist_conda': {
'pythons': (__file__, ['3.6', '3.7', '3.8']),
'platforms': (__file__, ['linux-64', 'win-32', 'win-64', 'osx-64']),
'force_conversion': (__file__, True),
},
},
)
| [
"[email protected]"
] | |
efa84b7b252d3f011527c3e5a96bab39d82863ad | c817d8c3daf2ea79dc02a2e624e49c2fd556007d | /audit/models.py | 40eea1974af60239d983e11b9cab78dd9c239773 | [] | no_license | DUMBALINYOLO/stats-filtering | 7a3d1ccd52527031a66946cdb06286a244be0b1f | 64d62f84bcfb465cb8721cdbfbb00fe034ac9893 | refs/heads/master | 2023-03-17T11:09:17.522663 | 2021-03-12T12:01:16 | 2021-03-12T12:01:16 | 347,049,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | from django.db import models
class AuditLog(models.Model):
timestamp = models.DateTimeField(auto_now=True)
user = models.CharField(max_length=50, null=False, blank=False)
user_ip = models.CharField(max_length=100, null=False, blank=False)
action_name = models.CharField(max_length=20, null=False, blank=False)
table_name = models.CharField(max_length=50, null=True, blank=True)
task_name = models.CharField(max_length=50, null=True, blank=True)
action_details = models.CharField(max_length=200, null=True, blank=True)
data = models.TextField(null=True, blank=True)
def __str__(self):
return str(self.timestamp)+'_'+self.user
| [
"[email protected]"
] | |
0bce70d10cc3aaf768ca97f81cc8c150bf7dc968 | e5483ab737acd9fb222f0b7d1c770cfdd45d2ba7 | /ecommerce/core/migrations/0019_auto_20200617_1118.py | ac79742a44b8082f258a0b47704601705075a955 | [] | no_license | mxmaslin/otus_web | 6c1e534047444d7a1fc4cd1bf8245c25d9fc4835 | b90ad69e1b5c1828fa2ace165710422d113d1d17 | refs/heads/master | 2022-12-09T19:52:58.626199 | 2020-07-07T19:15:52 | 2020-07-07T19:15:52 | 226,154,128 | 1 | 1 | null | 2022-12-08T03:23:10 | 2019-12-05T17:25:11 | JavaScript | UTF-8 | Python | false | false | 2,545 | py | # Generated by Django 2.2.12 on 2020-06-17 08:18
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0018_coupon_refund_userprofile'),
]
operations = [
migrations.AlterModelOptions(
name='coupon',
options={'verbose_name': 'Купон', 'verbose_name_plural': 'Купоны'},
),
migrations.AlterModelOptions(
name='refund',
options={'verbose_name': 'Возврат', 'verbose_name_plural': 'Возвраты'},
),
migrations.AlterModelOptions(
name='userprofile',
options={'verbose_name': 'Профиль пользователя', 'verbose_name_plural': 'Профили пользователей'},
),
migrations.AlterField(
model_name='address',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
migrations.AlterField(
model_name='coupon',
name='amount',
field=models.FloatField(verbose_name='Скидка'),
),
migrations.AlterField(
model_name='coupon',
name='code',
field=models.CharField(max_length=15, verbose_name='Код'),
),
migrations.AlterField(
model_name='refund',
name='accepted',
field=models.BooleanField(default=False, verbose_name='Выполнен'),
),
migrations.AlterField(
model_name='refund',
name='order',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Order', verbose_name='Заказ'),
),
migrations.AlterField(
model_name='refund',
name='reason',
field=models.TextField(verbose_name='Причина'),
),
migrations.AlterField(
model_name='userprofile',
name='one_click_purchasing',
field=models.BooleanField(default=False, verbose_name='Покупка в один клик'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь'),
),
]
| [
"[email protected]"
] | |
cfa5f4341bf4ff6482ca10400733edefed6df658 | f3693916a8b118bf139364604dac3f51235ed613 | /functional/Components/Clients/Clients_GET/test_TC_42892_Clients_GET_Invalid_Page_Size.py | 8e24a8ec90dc24677d7dd10d3374b3b4a548f8b0 | [] | no_license | muktabehera/QE | e7d62284889d8241d22506f6ee20547f1cfe6db1 | 3fedde591568e35f7b80c5bf6cd6732f8eeab4f8 | refs/heads/master | 2021-03-31T02:19:15.369562 | 2018-03-13T02:45:10 | 2018-03-13T02:45:10 | 124,984,177 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,684 | py | # -*- coding: UTF-8 -*-
"""PFE Component Tests - Clients.
* TC-42892 - Clients GET:
Verify that 20 records is displayed on providing 'page-size' value as 0 for 'page' parameter using request GET /clients.
Equivalent test CURL command:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X GET -H "Content-Type: application/json"
"<PF_host>://<client_host>/clients?page=1;0"
Same, with test data:
curl -H "Host: <client_host>" -H "Authorization: Bearer <valid_token>"
-X GET -H "Content-Type: application/json"
"<PF_host>://<client_host>/clients?page=1;0"
"""
import pytest
from qe_common import *
logger = init_logger()
@pytest.mark.components
@pytest.allure.story('Clients')
@pytest.allure.feature('GET')
class Test_PFE_Components(object):
"""PFE Clients test cases."""
@pytest.allure.link('https://jira.qumu.com/browse/TC-42892')
@pytest.mark.Clients
@pytest.mark.GET
def test_TC_42892_GET_Clients_Invalid_Page_Size(self, context):
"""TC-42892 - Clients-GET
Verify that 20 records is displayed on providing 'page-size' value as 0 for 'page' parameter using request GET /clients."""
# Define a test step
with pytest.allure.step("""Verify that 20 records is displayed on providing 'page-size' value as 0 for 'page' parameter using request GET /clients."""):
# listEntities the Clients.
# The `check` call validates return code
# and some of the swagger schema.
# Most schema checks are disabled.
check(
context.cl.Clients.listEntities(
page='1;0')
)
| [
"[email protected]"
] | |
4b1816e64d86e656e29c4d7e8747cabafc9b5f74 | 4a36b5979b0753b32cff3956fd97fb8ed8b11e84 | /1.0/_downloads/a783c0b285deabf61a1ae7035b88256a/cluster_stats_evoked.py | 30ed1d1a078ff13647503661206530566df17338 | [] | permissive | mne-tools/mne-tools.github.io | 8aac7ae10bf2faeeb875b9a351a5530dc0e53154 | 495e878adc1ef3374e3db88604504d7542b01194 | refs/heads/main | 2023-09-03T07:06:00.660557 | 2023-09-03T04:10:18 | 2023-09-03T04:10:18 | 35,639,371 | 12 | 16 | BSD-3-Clause | 2023-05-05T19:04:32 | 2015-05-14T22:04:23 | HTML | UTF-8 | Python | false | false | 2,790 | py | # -*- coding: utf-8 -*-
"""
.. _ex-cluster-evoked:
=======================================================
Permutation F-test on sensor data with 1D cluster level
=======================================================
One tests if the evoked response is significantly different
between conditions. Multiple comparison problem is addressed
with cluster level permutation test.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD-3-Clause
# %%
import matplotlib.pyplot as plt
import mne
from mne import io
from mne.stats import permutation_cluster_test
from mne.datasets import sample
print(__doc__)
# %%
# Set parameters
data_path = sample.data_path()
meg_path = data_path / 'MEG' / 'sample'
raw_fname = meg_path / 'sample_audvis_filt-0-40_raw.fif'
event_fname = meg_path / 'sample_audvis_filt-0-40_raw-eve.fif'
tmin = -0.2
tmax = 0.5
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname)
events = mne.read_events(event_fname)
channel = 'MEG 1332' # include only this channel in analysis
include = [channel]
# %%
# Read epochs for the channel of interest
picks = mne.pick_types(raw.info, meg=False, eog=True, include=include,
exclude='bads')
event_id = 1
reject = dict(grad=4000e-13, eog=150e-6)
epochs1 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition1 = epochs1.get_data() # as 3D matrix
event_id = 2
epochs2 = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), reject=reject)
condition2 = epochs2.get_data() # as 3D matrix
condition1 = condition1[:, 0, :] # take only one channel to get a 2D array
condition2 = condition2[:, 0, :] # take only one channel to get a 2D array
# %%
# Compute statistic
threshold = 6.0
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_test([condition1, condition2], n_permutations=1000,
threshold=threshold, tail=1, n_jobs=1,
out_type='mask')
# %%
# Plot
times = epochs1.times
fig, (ax, ax2) = plt.subplots(2, 1, figsize=(8, 4))
ax.set_title('Channel : ' + channel)
ax.plot(times, condition1.mean(axis=0) - condition2.mean(axis=0),
label="ERF Contrast (Event 1 - Event 2)")
ax.set_ylabel("MEG (T / m)")
ax.legend()
for i_c, c in enumerate(clusters):
c = c[0]
if cluster_p_values[i_c] <= 0.05:
h = ax2.axvspan(times[c.start], times[c.stop - 1],
color='r', alpha=0.3)
else:
ax2.axvspan(times[c.start], times[c.stop - 1], color=(0.3, 0.3, 0.3),
alpha=0.3)
hf = plt.plot(times, T_obs, 'g')
ax2.legend((h, ), ('cluster p-value < 0.05', ))
ax2.set_xlabel("time (ms)")
ax2.set_ylabel("f-values")
| [
"[email protected]"
] | |
b21cc60288a12a525d33281ba13def79fd81b34a | 597c4f48332251552a602122bb3d325bc43a9d7f | /etc/chapter09_stack_old/implement/04_empty.py | aacff5442075e0c2020872af886861589bfe5559 | [] | no_license | Kyeongrok/python_algorithm | 46de1909befc7b17766a57090a7036886361fd06 | f0cdc221d7908f26572ae67b5c95b12ade007ccd | refs/heads/master | 2023-07-11T03:23:05.782478 | 2023-06-22T06:32:31 | 2023-06-22T06:32:31 | 147,303,654 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | class Stack1():
arr = []
last_index = 0
def __init__(self, size=10000):
self.arr = [None] * size
def push(self, value):
self.arr[self.last_index] = value
self.last_index += 1
def pop(self):
value = self.arr[self.last_index - 1]
self.last_index -= 1
return value
st = Stack1()
print(st.pop())
from _collections import deque
[].pop()
| [
"[email protected]"
] | |
b34a8a5649c8d6340f7eb3cfb2c1d166f74f221b | 33736b585caa659ac4a5a8a1ac52df50bdf71f1b | /py_solution/5_SMS.py | 53e953b2191981118abe61d67c0f78617db28fe7 | [] | no_license | oliverhuangchao/epic_interview | 3d649fadab0728c629bfe9d8cc14b9045a593385 | 4cfdbc0b83e13e7552633e566b3ddbb4a250a6a0 | refs/heads/master | 2021-01-10T22:29:37.663863 | 2015-07-17T18:55:42 | 2015-07-17T18:55:42 | 38,897,661 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | # SMS
# You are given a telephone keyboard
# 0-0, 1-1, 2-ABC2, 3-DEF3, 4-GHI4, 5-JKL5, 6-MNO6,7-PQRS7, 8-TUV8, 9-WXYZ9, *-space,
# #-char separater
# if you type "2", you will get 'A', that is "2"-'A', "22"-'B' ,"222"-'C', "2222"-'D'
# However, the digits can repeated many times
# "22222"-you get 'A' again . Waral
# You can use "#" to separate characters, for example
# "2#22", you get "AB" .
# However, you may also have consecutive different digits without separator:"23"-'AD'
# If you type "*", it means space.
# You a given a sequence of digits, translate it into a text message
import string
#prepare at the begining
ori = {0:'0', 1:'1', 2:'ABC2', 3:'DEF3', 4:'GHI4', 5:'JKL5', 6:'MNO6',7:'PQRS7', 8:'TUV8', 9:'WXYZ9'}
all = string.ascii_uppercase
newdict = dict()
for i in ori:
newdict[str(i)] = ori[i][-1]
for i in range(2,10):
count = 1
for j in ori[i][:-1]:
newdict[str(i)*count] = j#[str(i) for k in range(count)]
count+=1
def transform(newdict,inputstring):
words = inputstring.split("*")
res = ""
for item in words:
z = item.split("#")
for each in z:
if each in newdict:
res += newdict[each]
else:
x = each[0]
for i in range(1,len(each)):
if each[i] != x:
res += newdict[each[:i]]
res += newdict[each[i:]]
x = each[i]
res += " "
print res
inputstring = "12*322#2*33"
print inputstring
transform(newdict, inputstring)
| [
"[email protected]"
] | |
4ae97a5658a60b30643ff161b05f6a8521096ec4 | 5a4d5ee624b375ece06fda1467afe18beb69c14b | /Algorithm/SW_Expert/1-38.py | 88b04484164d99806c34411f17d319537db83606 | [] | no_license | Knightofcydonia51/TIL | cd10dab949659bc827118ee42b25d926336dce23 | 78d7e8617f4abed9932a557c12e68bd950f8230d | refs/heads/master | 2022-12-26T00:10:06.262200 | 2022-05-26T01:12:32 | 2022-05-26T01:12:32 | 195,938,010 | 0 | 0 | null | 2022-12-16T01:03:09 | 2019-07-09T05:22:49 | Python | UTF-8 | Python | false | false | 109 | py | l=[1,2,3,4,3,2,1]
def deleter(list2):
list2=list(set(list2))
return list2
print(l)
print(deleter(l)) | [
"[email protected]"
] | |
5e405561491c9f8ec9865d157896c876e026bf58 | 98c6ea9c884152e8340605a706efefbea6170be5 | /examples/data/Assignment_2/mhmmik002/question2.py | f65b3961a2a06e9df45c62d5bc009ab70df32d71 | [] | no_license | MrHamdulay/csc3-capstone | 479d659e1dcd28040e83ebd9e3374d0ccc0c6817 | 6f0fa0fa1555ceb1b0fb33f25e9694e68b6a53d2 | refs/heads/master | 2021-03-12T21:55:57.781339 | 2014-09-22T02:22:22 | 2014-09-22T02:22:22 | 22,372,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,274 | py | print("Welcome to the 30 Second Rule Expert")
print("------------------------------------")
print("Answer the following questions by selecting from among the options.")
x=input("Did anyone see you? (yes/no)\n")
if (x) == "yes":
b=input("Was it a boss/lover/parent? (yes/no)\n")
if (b) == "no":
print("Decision: Eat it.")
else:
a=input("Was it expensive? (yes/no)\n")
if (a) == "yes":
s=input("Can you cut off the part that touched the floor? (yes/no)\n")
if (s) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Your call.")
else:
f=input("Is it chocolate? (yes/no)\n")
if (f) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Don't eat it.")
else:
z=input("Was it sticky? (yes/no)\n")
if (z) == "yes":
v=input("Is it a raw steak? (yes/no)\n")
if (v) == "yes":
m=input("Are you a puma? (yes/no)\n")
if (m) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Don't eat it.")
else:
n=input("Did the cat lick it? (yes/no)\n")
if (n) == "yes":
g=input("Is your cat healthy? (yes/no)\n")
if (g) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Your call.")
else:
print("Decision: Eat it.")
else:
c=input("Is it an Emausaurus? (yes/no)\n")
if (c) == "yes":
d=input("Are you a Megalosaurus? (yes/no)\n")
if (d) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Don't eat it.")
else:
n=input("Did the cat lick it? (yes/no)\n")
if (n) == "yes":
g=input("Is your cat healthy? (yes/no)\n")
if (g) == "yes":
print("Decision: Eat it.")
else:
print("Decision: Your call.")
else:
print("Decision: Eat it.") | [
"[email protected]"
] | |
b5b6b93910e5075aaefa207e44ac09ac7a47bada | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_89/41.py | 4cb3ead2a2dc396c5a937e620544e8867f8ae5e0 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,082 | py | #!/usr/bin/env python
primes=[2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997]
from math import log
from sys import stdin
for case in range(1, int(stdin.readline())+1):
N=int(stdin.readline())
if N==1: n=0
else:
n=1
for p in primes:
if p<=N:
n+=int(log(N,p))-1
else: break
print "Case #%d:"%case, n
| [
"[email protected]"
] | |
0cbeaf2442721ecc71a0cd8158504cac1b4e4f47 | eade1861db1968645e0e17dfaa5250a4b8245b98 | /instacart/faron.py | bf8a14a8e3fb487d4321cee3fb9b8eb7eb4a4b08 | [] | no_license | piupiuup/competition | 5b5da56fed336e07cf99cef8f5bfe89a8f771900 | 076c30df3d2647cb3580c543e604375e84590ca7 | refs/heads/master | 2022-09-30T14:47:01.244084 | 2020-05-30T12:56:02 | 2020-05-30T12:56:02 | 268,074,180 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,764 | py | """
@author: Faron
"""
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import multiprocessing
from datetime import datetime
'''
Calculates (user, product) order_streak for the last n orders.
- abs(order_streak) is length of streak
- sgn(order_streak) encodes type of streak (non-ordered vs ordered)
'''
DATA_DIR = "../input/"
PRIOR_FILE = "order_products__prior"
ORDERS_FILE = "orders"
def load_input_data():
PATH = "{}{}{}".format(DATA_DIR, PRIOR_FILE, ".csv")
prior = pd.read_csv(PATH, dtype={'order_id': np.int32,
'product_id': np.uint16,
'add_to_cart_order': np.int16,
'reordered': np.int8})
PATH = "{}{}{}".format(DATA_DIR, ORDERS_FILE, ".csv")
orders = pd.read_csv(PATH, dtype={'order_id': np.int32,
'user_id': np.int64,
'order_number': np.int16,
'order_dow': np.int8,
'order_hour_of_day': np.int8,
'days_since_prior_order': np.float32})
return prior, orders
def apply_parallel(df_groups, _func):
nthreads = multiprocessing.cpu_count() >> 1
print("nthreads: {}".format(nthreads))
res = Parallel(n_jobs=nthreads)(delayed(_func)(grp.copy()) for _, grp in df_groups)
return pd.concat(res)
def add_order_streak(df):
tmp = df.copy()
tmp.user_id = 1
UP = tmp.pivot(index="product_id", columns='order_number').fillna(-1)
UP.columns = UP.columns.droplevel(0)
x = np.abs(UP.diff(axis=1).fillna(2)).values[:, ::-1]
df.set_index("product_id", inplace=True)
df['order_streak'] = np.multiply(np.argmax(x, axis=1) + 1, UP.iloc[:, -1])
df.reset_index(drop=False, inplace=True)
return df
if __name__ == '__main__':
prior, orders = load_input_data()
print("orders: {}".format(orders.shape))
print("take only recent 5 orders per user:")
orders = orders.groupby(['user_id']).tail(5 + 1)
print("orders: {}".format(orders.shape))
prior = orders.merge(prior, how='inner', on="order_id")
prior = prior[['user_id', 'product_id', 'order_number']]
print("prior: {}".format(prior.shape))
user_groups = prior.groupby('user_id')
s = datetime.now()
df = apply_parallel(user_groups, add_order_streak)
e = datetime.now()
print("time elapsed: {}".format(e - s))
df = df.drop("order_number", axis=1).drop_duplicates().reset_index(drop=True)
df = df[['user_id', 'product_id', 'order_streak']]
print(df.head(n=10))
df.to_csv("order_streaks.csv", index=False)
print("order_streaks.csv has been written") | [
"[email protected]"
] | |
92c1d2a705987af6519a8232a61560316e935a30 | 039f2c747a9524daa1e45501ada5fb19bd5dd28f | /ABC008/ABC008c.py | 87101a377b00a29da7177f1b64b86131d72617f7 | [
"Unlicense"
] | permissive | yuto-moriizumi/AtCoder | 86dbb4f98fea627c68b5391bf0cc25bcce556b88 | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | refs/heads/master | 2023-03-25T08:10:31.738457 | 2021-03-23T08:48:01 | 2021-03-23T08:48:01 | 242,283,632 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | #ABC008c
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
| [
"[email protected]"
] | |
aeb73fa853bdfc044c427f1d12e75525607b2690 | b7d766db43e1857bc1c886bbffa01817d201fb2e | /Algorithm PS/이것이 취업을 위한 코딩테스트다/Chapter 11 그리디 문제/볼링공 고르기.py | 7b772fe7bd59365f6202c61571944cf8694f0793 | [] | no_license | Jongminfire/Python | ae4010b23b60b59cddd837344784ef9da33d1b1d | 11219310cd13c18647c3220b89878c25fdc98922 | refs/heads/main | 2023-07-27T20:27:12.612883 | 2021-09-10T08:05:01 | 2021-09-10T08:05:01 | 307,398,237 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 372 | py | n,m = map(int,input().split())
ball = list(map(int,input().split()))
# 중복을 제외한 종류를 얻기 위해 (문제에서는 m이 10이하 이므로 list로 선언해도 됨)
s = set(ball)
answer = 0
# 중복이 하나도 없는 경우 계산
for i in range(1,n):
answer += i
# 중복된 만큼 빼주기
for i in s:
answer -= ball.count(i)-1
print(answer) | [
"[email protected]"
] | |
1b1ff1573ecfd049b15a8a82ced9916ca5a8548e | cd127231a354bf7a299667e65cbd83265988be7f | /COMPANIES/ness/ex.py | f54d1ef46bca7fff2e8525f57769802775ccf1a2 | [] | no_license | nagireddy96666/Interview_-python | de96c8d2dfd56343351bd0039adad561e79aac1a | 6789d200ded60575682f467c880990937e4d4f0f | refs/heads/master | 2020-03-10T01:53:17.185819 | 2018-04-11T16:22:05 | 2018-04-11T16:22:05 | 129,121,777 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,416 | py | >>> x=(1,2,(4,5,6,7,8))
>>> x[2][1:4]
(5, 6, 7)
>>> "".join([str(i) for i in x[2][1:4]])
'567'
>>> s="apple banna and apple banna "
>>> s.count('apple')
2
>>> s.count('banna')==s.count('apple')
True
>>> l=['apple',['banna','apple']]
>>> l.count('apple')
1
>>> set(l)
Traceback (most recent call last):
File "<pyshell#8>", line 1, in <module>
set(l)
TypeError: unhashable type: 'list'
>>> i='apple'
>>> list(l)
['apple', ['banna', 'apple']]
>>> for i in l:
if i=="apple:"
SyntaxError: invalid syntax
>>> for i in l:
if i=="apple":
count+=1
else:
x=i.count('apple')
count+=x
Traceback (most recent call last):
File "<pyshell#18>", line 3, in <module>
count+=1
NameError: name 'count' is not defined
>>> count=0
>>> for i in l:
if i=="apple":
count+=1
else:
x=i.count('apple')
count+=x
>>> print count
2
>>> count=0
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
print count
SyntaxError: invalid syntax
>>>
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
print count
SyntaxError: invalid syntax
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
>>>
>>> count
2
>>> l=['apple','banaba',['apple','banan']]
>>> count=0
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
>>> count
2
>>> l=['banaba',['apple','banan']]
>>> count=0
>>> for i in l:
if i.count('apple'):
count+=i.count('apple')
>>> count
1
>>>
| [
"[email protected]"
] | |
e80898bbcbe582829b80d0cba3f32816f4b4f2e6 | 15102eb2c657a296eb00821dc378225b79fbc17e | /Homework/venv/Lib/site-packages/pip-19.0.3-py3.7.egg/pip/_vendor/html5lib/treebuilders/__init__.py | 719f41d61f65e2d4064afc9d24e406f6c2af3e92 | [] | no_license | yuju13488/pyworkspace | 746446b3573fa6241d979b205e964e7d52af009b | 0c77836185237450ee446542e6ff3856c7cd7de1 | refs/heads/master | 2020-08-02T03:56:55.577735 | 2019-10-04T05:50:56 | 2019-10-04T05:50:56 | 211,226,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,607 | py | """A collection of modules for building different kinds of trees from HTML
documents.
To create a treebuilder for a new type of tree, you need to do
implement several things:
1. A set of classes for various types of elements: Document, Doctype, Comment,
Element. These must implement the interface of ``base.treebuilders.Node``
(although comment nodes have a different signature for their constructor,
see ``treebuilders.etree.Comment``) Textual content may also be implemented
as another node type, or not, as your tree implementation requires.
2. A treebuilder object (called ``TreeBuilder`` by convention) that inherits
from ``treebuilders.base.TreeBuilder``. This has 4 required attributes:
* ``documentClass`` - the class_hw to use for the bottommost node of a document
* ``elementClass`` - the class_hw to use for HTML Elements
* ``commentClass`` - the class_hw to use for comments
* ``doctypeClass`` - the class_hw to use for doctypes
It also has one required method:
* ``getDocument`` - Returns the root node of the complete document tree
3. If you wish to run the unit tests, you must also create a ``testSerializer``
method on your treebuilder which accepts a node and returns a string
containing Node and its children serialized according to the format used in
the unittests
"""
from __future__ import absolute_import, division, unicode_literals
from .._utils import default_etree
treeBuilderCache = {}
def getTreeBuilder(treeType, implementation=None, **kwargs):
"""Get a TreeBuilder class_hw for various types of trees with built-in support
:arg treeType: the name of the tree type required (case-insensitive). Supported
values are:
* "dom" - A generic builder for DOM implementations, defaulting to a
xml.dom.minidom based implementation.
* "etree" - A generic builder for tree implementations exposing an
ElementTree-like interface, defaulting to xml.etree.cElementTree if
available and xml.etree.ElementTree if not.
* "lxml" - A etree-based builder for lxml.etree, handling limitations
of lxml's implementation.
:arg implementation: (Currently applies to the "etree" and "dom" tree
types). A module implementing the tree type e.g. xml.etree.ElementTree
or xml.etree.cElementTree.
:arg kwargs: Any additional options to pass to the TreeBuilder when
creating it.
Example:
>>> from html5lib.treebuilders import getTreeBuilder
>>> builder = getTreeBuilder('etree')
"""
treeType = treeType.lower()
if treeType not in treeBuilderCache:
if treeType == "dom":
from . import dom
# Come up with a sane default (pref. from the stdlib)
if implementation is None:
from xml.dom import minidom
implementation = minidom
# NEVER cache here, caching is done in the dom submodule
return dom.getDomModule(implementation, **kwargs).TreeBuilder
elif treeType == "lxml":
from . import etree_lxml
treeBuilderCache[treeType] = etree_lxml.TreeBuilder
elif treeType == "etree":
from . import etree
if implementation is None:
implementation = default_etree
# NEVER cache here, caching is done in the etree submodule
return etree.getETreeModule(implementation, **kwargs).TreeBuilder
else:
raise ValueError("""Unrecognised treebuilder "%s" """ % treeType)
return treeBuilderCache.get(treeType)
| [
"[email protected]"
] | |
1405cbdf7ef8552c640e6016fb19520d9b5d29bb | f82757475ea13965581c2147ff57123b361c5d62 | /gi-stubs/repository/Clutter/ActorPrivate.py | 5442f5ceb9b035c2bbbd78a25b97746718dc1c0e | [] | no_license | ttys3/pygobject-stubs | 9b15d1b473db06f47e5ffba5ad0a31d6d1becb57 | d0e6e93399212aada4386d2ce80344eb9a31db48 | refs/heads/master | 2022-09-23T12:58:44.526554 | 2020-06-06T04:15:00 | 2020-06-06T04:15:00 | 269,693,287 | 8 | 2 | null | 2020-06-05T15:57:54 | 2020-06-05T15:57:54 | null | UTF-8 | Python | false | false | 4,378 | py | # encoding: utf-8
# module gi.repository.Clutter
# from /usr/lib64/girepository-1.0/Clutter-1.0.typelib
# by generator 1.147
"""
An object which wraps an introspection typelib.
This wrapping creates a python module like representation of the typelib
using gi repository as a foundation. Accessing attributes of the module
will dynamically pull them in and create wrappers for the members.
These members are then cached on this introspection module.
"""
# imports
import gi as __gi
import gi.overrides.GObject as __gi_overrides_GObject
import gi.repository.Atk as __gi_repository_Atk
import gi.repository.GObject as __gi_repository_GObject
import gobject as __gobject
class ActorPrivate(__gi.Struct):
# no doc
def __delattr__(self, *args, **kwargs): # real signature unknown
""" Implement delattr(self, name). """
pass
def __dir__(self, *args, **kwargs): # real signature unknown
""" Default dir() implementation. """
pass
def __eq__(self, *args, **kwargs): # real signature unknown
""" Return self==value. """
pass
def __format__(self, *args, **kwargs): # real signature unknown
""" Default object formatter. """
pass
def __getattribute__(self, *args, **kwargs): # real signature unknown
""" Return getattr(self, name). """
pass
def __ge__(self, *args, **kwargs): # real signature unknown
""" Return self>=value. """
pass
def __gt__(self, *args, **kwargs): # real signature unknown
""" Return self>value. """
pass
def __hash__(self, *args, **kwargs): # real signature unknown
""" Return hash(self). """
pass
def __init_subclass__(self, *args, **kwargs): # real signature unknown
"""
This method is called when a class is subclassed.
The default implementation does nothing. It may be
overridden to extend subclasses.
"""
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, *args, **kwargs): # real signature unknown
""" Return self<=value. """
pass
def __lt__(self, *args, **kwargs): # real signature unknown
""" Return self<value. """
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
def __ne__(self, *args, **kwargs): # real signature unknown
""" Return self!=value. """
pass
def __reduce_ex__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __reduce__(self, *args, **kwargs): # real signature unknown
""" Helper for pickle. """
pass
def __repr__(self, *args, **kwargs): # real signature unknown
""" Return repr(self). """
pass
def __setattr__(self, *args, **kwargs): # real signature unknown
""" Implement setattr(self, name, value). """
pass
def __sizeof__(self, *args, **kwargs): # real signature unknown
""" Size of object in memory, in bytes. """
pass
def __str__(self, *args, **kwargs): # real signature unknown
""" Return str(self). """
pass
def __subclasshook__(self, *args, **kwargs): # real signature unknown
"""
Abstract classes can override this to customize issubclass().
This is invoked early on by abc.ABCMeta.__subclasscheck__().
It should return True, False or NotImplemented. If it returns
NotImplemented, the normal algorithm is used. Otherwise, it
overrides the normal algorithm (and the outcome is cached).
"""
pass
def __weakref__(self, *args, **kwargs): # real signature unknown
pass
__class__ = None # (!) real value is "<class 'gi.types.StructMeta'>"
__dict__ = None # (!) real value is "mappingproxy({'__info__': StructInfo(ActorPrivate), '__module__': 'gi.repository.Clutter', '__gtype__': <GType void (4)>, '__dict__': <attribute '__dict__' of 'ActorPrivate' objects>, '__weakref__': <attribute '__weakref__' of 'ActorPrivate' objects>, '__doc__': None})"
__gtype__ = None # (!) real value is '<GType void (4)>'
__info__ = StructInfo(ActorPrivate)
| [
"[email protected]"
] | |
9b18c9b220820abf2ca3f6d3ac3ad54b51e7b61b | 8115597c29c6d38b947f7097cbe00e43d20839c4 | /src/watch_copter_vs_enemies.py | 96cd17b6be2b6432e87fd2488b31b7371376b2e0 | [] | no_license | Vottivott/evolutionary-algorithms | cd323f85fb9aa8e57377dfba9237275bf9326649 | ccd6f7670fea42fee40d2c127efe8c96bba21cb0 | refs/heads/master | 2018-10-23T23:16:28.085776 | 2017-09-28T22:04:02 | 2017-09-28T22:04:02 | 103,953,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 80 | py | from copter_simulation import watch_copter_vs_enemies
watch_copter_vs_enemies() | [
"[email protected]"
] | |
0cad81c5959f3b052190971ff25ba4c17be272b4 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /K3qMssK6mF34ctXE5_0.py | d554d4ac09f043697d131072a7af0034bd43ba29 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42 | py |
def square_patch(n):
return [[n]*n]*n
| [
"[email protected]"
] | |
6a8fd64fa290a4515022aa9b4be3f29099b8f7b8 | 537e2be29992f8bfd3fb2797003102f4e79f5f9f | /scripts/seq-composition | 617c244546da86e1baf3cf510e368eabb5095e37 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | etal/biofrills | a0cf45700abbda865f71d55030717dee4d769446 | 36684bb6c7632f96215e8b2b4ebc86640f331bcd | refs/heads/master | 2020-06-01T16:29:41.540511 | 2013-10-21T23:01:19 | 2013-10-21T23:01:19 | 5,113,363 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 642 | #!/usr/bin/env python
"""Print the frequencies of each letter in a sequence set."""
# TODO - move the calculation to module (take input stream, return freqs)
import fileinput
from collections import Counter
# Count character types
counts = Counter()
for line in fileinput.input():
if line.startswith('>') or not line.strip():
continue
counts.update(Counter(line.strip()))
# Convert counts to frequencies
scale = 1.0 / sum(counts.values())
freqs = dict((char, cnt * scale)
for char, cnt in counts.iteritems())
# Print a nice table
for char, frq in sorted(freqs.iteritems()):
print '%s: %f' % (char, frq)
| [
"[email protected]"
] | ||
7362e34f612448e62b39d7ee13d6f41730354825 | ba27372850fd287f4e268f486103afb797c7f4f4 | /setup.py | 286ff3234f0365f85c60fc77027909c3e6576437 | [
"BSD-3-Clause"
] | permissive | django-blog-zinnia/feed2zinnia | 9702a3b177f16009ac49907b2298f98243fab374 | ec1a5e44f6175dab248e2f4f9ba3f9ecb2800e6b | HEAD | 2016-09-16T05:26:56.676013 | 2015-01-15T11:19:01 | 2015-01-15T11:19:01 | 29,293,499 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,092 | py | """Setup script of feed2zinnia"""
from setuptools import setup
from setuptools import find_packages
import zinnia_feed
setup(
name='feed2zinnia',
version=zinnia_feed.__version__,
description='Import your RSS or Atom feed into Zinnia',
long_description=open('README.rst').read(),
keywords='django, zinnia, feed, rss, atom',
author=zinnia_feed.__author__,
author_email=zinnia_feed.__email__,
url=zinnia_feed.__url__,
packages=find_packages(exclude=['demo_zinnia_feed']),
classifiers=[
'Framework :: Django',
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
'Topic :: Software Development :: Libraries :: Python Modules'],
license=zinnia_feed.__license__,
include_package_data=True,
zip_safe=False,
install_requires=['feedparser>=5.1.3']
)
| [
"[email protected]"
] | |
177e721a596ee080d3343228f66a65ecd4fa0724 | dc965a62709bbb2c6c9ad01859a83507d7457941 | /Assignments/Class Assignments/AutoGenerateClass.py | ee53ba982de479f56ddc5aeb651099442d698a61 | [] | no_license | JyotiSathe/Python | ead31a84cde86d734acdf0ad83c27c6bb1c1a331 | 846371d678ba225c210493605233b262a51bd950 | refs/heads/master | 2021-05-11T22:38:30.299035 | 2018-06-24T14:08:37 | 2018-06-24T14:08:37 | 117,364,196 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 692 | py | class AutoGenerate:
def __init__(self,start,stop,step=1):
self.start=start
self.stop=stop
self.step=step
def Next(self):
self.start+=self.step
if self.start>=self.stop:
raise StopIteration
yield self.start
def next(self):
return self.Next().next()
def __next__(self):
return self.Next().__next__()
#def __iter__(self):
# return self
def main():
x=AutoGenerate(0,100,5)
#for sets iterator so if need to give for needs to have iter method
#for y in x:
# print y
y=x.next()
print (y)
if __name__=='__main__':
main()
| [
"[email protected]"
] | |
a9dfdb93f377c71c1655c5383fe4d557af7f730b | 6758974fd7046a3947f1387a788cfebe7ac85b22 | /BilibiliUpVideosDownloader/BilibiliUpVideosDownload.py | d5eba781595ae6be622bd83dbf99b1ad88cb45dd | [] | no_license | HkerVit/facebook-api-video-upload-py | d85ec55c3e7adacaf094b8440111ccdb8d065a6f | 6bcf0f4c08512c5b3896c6f61a80de66c0a59744 | refs/heads/main | 2023-03-16T22:42:48.137863 | 2021-03-11T03:49:23 | 2021-03-11T03:49:23 | 346,636,830 | 1 | 0 | null | 2021-03-11T08:51:47 | 2021-03-11T08:51:46 | null | UTF-8 | Python | false | false | 2,444 | py | import sys, getopt
import requests
import json
import os
import pymysql
def get_history(bvid):
find_sql = "SELECT * FROM download_history_bilibili WHERE bvid='{}'".format(bvid)
findres = cursor.execute(find_sql)
if findres == 0:
res = False
else:
res = True
return res
def get_url_list(uid):
url = f"https://api.bilibili.com/x/space/arc/search?mid={uid}&ps=30&tid=0&pn=1&keyword=&order=pubdate&jsonp=jsonp"
data = json.loads(requests.get(url).text)
if data["code"] == 0:
count = data["data"]["page"]["count"]
page_count = int(count/30) + 1
for page in range(page_count):
pn = page + 1
url = f"https://api.bilibili.com/x/space/arc/search?mid={uid}&ps=30&tid=0&pn={pn}&keyword=&order=pubdate&jsonp=jsonp"
page_vdict = json.loads(requests.get(url).text)["data"]["list"]["vlist"]
for vdict in page_vdict:
bvid="https://www.bilibili.com/video/"+vdict["bvid"]
vdict['bvid']=bvid
vdict['pic']=vdict['pic'].replace("//",'')
bvidExits=get_history(bvid)
if not bvidExits:
values_list = list(vdict.values())
values_list = ["0"] + values_list
values = tuple(values_list)
add_sql = "INSERT INTO download_history_bilibili VALUES {}".format(values)
cursor.execute(add_sql)
db.commit()
print("Insert: ", bvid)
elif bvidExits:
print("Exist: ",bvid)
def downloadVideo(uid):
find_sql = "SELECT * FROM download_history_bilibili WHERE mid='{}'".format(uid)
cursor.execute(find_sql)
res=cursor.fetchall()
for r in res:
bvid = r[16]
author=r[10]
path = "./download/{}/".format(author)
pathExist=os.path.exists(path)
if not pathExist:
os.makedirs(path)
cmd = "annie -o {} {}".format(path,bvid)
os.system(cmd)
if __name__ == "__main__":
db_host = "45.76.170.159"
db_user = "db_poster"
db_name = "db_poster"
db_pass = "ysq1159889481"
db = pymysql.connect(host=db_host, user=db_user, password=db_pass, database=db_name)
cursor = db.cursor()
# get_url_list(15183062)
downloadVideo(15183062)
db.close()
| [
"[email protected]"
] | |
256bc94180a64e4adbbbbc23b29e319b6f40ded7 | 751b094918ae9200afe7824d58804549082caa95 | /src/python/WMComponent/DBS3Buffer/Oracle/CreateBlocks.py | cad2bf954ebb5bcb10c781f908e8dbdf8a3500e2 | [] | no_license | cinquo/WMCore | 7ebd13269f42eb97f416f8f2bdaca05fa93c6afc | 122f9332f2e944154dd0df68b6b3f2875427b032 | refs/heads/master | 2021-01-09T06:28:58.947626 | 2013-06-05T08:31:53 | 2013-06-05T08:31:53 | 2,965,330 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | #!/usr/bin/env python
"""
_DBSBuffer.SetBlockStatus_
Create new block in dbsbuffer_block
Update file to reflect block information
"""
import threading
import exceptions
from WMComponent.DBS3Buffer.MySQL.CreateBlocks import CreateBlocks as MySQLCreateBlocks
class CreateBlocks(MySQLCreateBlocks):
"""
Oracle implementation
"""
| [
"sfoulkes@4525493e-7705-40b1-a816-d608a930855b"
] | sfoulkes@4525493e-7705-40b1-a816-d608a930855b |
a36698307a7e3d636b68d44b8f6c8edb79ccff13 | 97da505ec0524d7b214764d198ed9b82e79300ed | /pyiem/util.py | 214d7992d40ef89bc531b372e3a548bf3c071eb3 | [
"MIT"
] | permissive | morganetanu/pyIEM | c035a1706cccff0afed209f14760f2668259667f | 2a38d1de77d056161408e804b5c246b7e6b38056 | refs/heads/master | 2021-08-20T07:10:40.071377 | 2017-11-28T13:41:29 | 2017-11-28T13:41:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,813 | py | # -*- coding: utf-8 -*-
"""Utility functions for pyIEM package
This module contains utility functions used by various parts of the codebase.
"""
import netrc
import time
import random
import logging
import datetime
import re
import warnings
import getpass
from socket import error as socket_error
import psycopg2
from pyiem.ftpsession import FTPSession
import numpy as np
SEQNUM = re.compile(r"\001?[0-9]{3}\s?")
def get_dbconn(dbname, user=None):
"""Helper function with business logic to get a database connection
Note that this helper could return a read-only database connection if the
connection to the primary server fails.
Args:
dbname (str): the database name to connect to
user (str,optional): hard coded user to connect as, default: current user
Returns:
psycopg2 database connection
"""
if user is None:
user = getpass.getuser()
# We hard code the apache user back to nobody
if user == 'apache':
user = 'nobody'
host = "iemdb"
if dbname == 'hads':
host = "iemdb-hads"
try:
pgconn = psycopg2.connect(database=dbname, host=host, user=user,
connect_timeout=15)
except psycopg2.OperationalError as exp:
warnings.warn("database connection failure: %s" % (exp, ))
# as a stop-gap, lets try connecting to iemdb2
pgconn = psycopg2.connect(database=dbname, host='iemdb2', user=user,
connect_timeout=15)
return pgconn
def noaaport_text(text):
"""Make whatever text look like it is NOAAPort Pristine
Args:
text (string): the inbound text
Returns:
text that looks noaaportish
"""
# Convert to LFLFCR
text = text.replace("\n", "\r\r\n").replace("\r\r\r\r", "\r\r")
lines = text.split("\r\r\n")
# remove any beginning empty lines
while lines and lines[0] == '':
lines.pop(0)
# lime 0 should be start of product sequence
if lines[0] != "\001":
lines.insert(0, "\001")
# line 1 should be the LDM sequence number 4 chars
if not SEQNUM.match(lines[1]):
if len(lines[1]) > 5:
lines.insert(1, "000 ")
# last line should be the control-c, by itself
if lines[-1] != "\003":
lines.append("\003")
# Second line should not be blank
if lines[1].strip() == 0:
lines = [lines[0], ] + lines[2:]
return "\r\r\n".join(lines)
def get_autoplot_context(fdict, cfg):
"""Get the variables out of a dict of strings
This helper for IEM autoplot gets values out of a dictionary of strings,
as provided by CGI. It does some magic to get types right, defaults right
and so on. The typical way this is called
ctx = iemutils.get_context(fdict, get_description())
Args:
fdict (dictionary): what was likely provided by `cgi.FieldStorage()`
cfg (dictionary): autoplot value of get_description
Returns:
dictionary of variable names and values, with proper types!
"""
ctx = {}
for opt in cfg.get('arguments', []):
name = opt.get('name')
default = opt.get('default')
typ = opt.get('type')
minval = opt.get('min')
maxval = opt.get('max')
optional = opt.get('optional', False)
value = fdict.get(name)
if optional and value is None and typ not in ['vtec_ps']:
continue
if typ in ['station', 'zstation', 'sid', 'networkselect']:
# A bit of hackery here if we have a name ending in a number
netname = "network%s" % (name[-1] if name[-1] != 'n' else '',)
ctx[netname] = fdict.get(netname)
# The network variable tags along and within a non-PHP context,
# this variable is unset, so we do some more hackery here
if ctx[netname] is None:
ctx[netname] = opt.get('network')
elif typ in ['int', 'month', 'zhour', 'hour', 'day', 'year']:
if value is not None:
value = int(value)
if default is not None:
default = int(default)
elif typ == 'float':
if value is not None:
value = float(value)
if default is not None:
default = float(default)
elif typ == 'select':
options = opt.get('options', dict())
if value not in options:
value = default
elif typ == 'datetime':
# tricky here, php has YYYY/mm/dd and CGI has YYYY-mm-dd
if default is not None:
default = datetime.datetime.strptime(default,
'%Y/%m/%d %H%M')
if minval is not None:
minval = datetime.datetime.strptime(minval,
'%Y/%m/%d %H%M')
if maxval is not None:
maxval = datetime.datetime.strptime(maxval,
'%Y/%m/%d %H%M')
if value is not None:
if value.find(" ") == -1:
value += " 0000"
value = datetime.datetime.strptime(value, '%Y-%m-%d %H%M')
elif typ == 'date':
# tricky here, php has YYYY/mm/dd and CGI has YYYY-mm-dd
if default is not None:
default = datetime.datetime.strptime(default,
'%Y/%m/%d').date()
if minval is not None:
minval = datetime.datetime.strptime(minval,
'%Y/%m/%d').date()
if maxval is not None:
maxval = datetime.datetime.strptime(maxval,
'%Y/%m/%d').date()
if value is not None:
value = datetime.datetime.strptime(value, '%Y-%m-%d').date()
elif typ == 'vtec_ps':
# VTEC phenomena and significance
for label in ['phenomena', 'significance']:
label = label + name
ctx[label] = fdict.get(label)
continue
# validation
if minval is not None and value is not None and value < minval:
value = default
if maxval is not None and value is not None and value > maxval:
value = default
ctx[name] = value if value is not None else default
return ctx
def exponential_backoff(func, *args, **kwargs):
""" Exponentially backoff some function until it stops erroring"""
msgs = []
for i in range(5):
try:
return func(*args, **kwargs)
except socket_error as serr:
msgs.append("%s/5 %s traceback: %s" % (i+1, func.__name__, serr))
time.sleep((2 ** i) + (random.randint(0, 1000) / 1000))
except Exception as exp:
msgs.append("%s/5 %s traceback: %s" % (i+1, func.__name__, exp))
time.sleep((2 ** i) + (random.randint(0, 1000) / 1000))
logging.error("%s failure" % (func.__name__,))
logging.error("\n".join(msgs))
return None
def send2box(filenames, remote_path, remotenames=None,
ftpserver='ftp.box.com', tmpdir='/tmp', fs=None):
"""Send one or more files to CyBox
Box has a filesize limit of 15 GB, so if we find any files larger than
that, we shall split them into chunks prior to uploading.
Args:
filenames (str or list): filenames to upload
remote_path (str): location to place the filenames
remotenames (str or list): filenames to use on the remote FTP server
should match size and type of filenames
ftpserver (str): FTP server to connect to...
tmpdir (str, optional): Temperary folder to if an individual file is over
15 GB in size
Returns:
FTPSession
list of success `True` or failures `False` matching filenames
"""
credentials = netrc.netrc().hosts[ftpserver]
if fs is None:
fs = FTPSession(ftpserver, credentials[0], credentials[2],
tmpdir=tmpdir)
if isinstance(filenames, str):
filenames = [filenames, ]
if remotenames is None:
remotenames = filenames
if isinstance(remotenames, str):
remotenames = [remotenames, ]
res = fs.put_files(remote_path, filenames, remotenames)
return fs, res
def get_properties():
"""Fetch the properties set
Returns:
dict: a dictionary of property names and values (both str)
"""
pgconn = psycopg2.connect(database='mesosite', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""SELECT propname, propvalue from properties""")
res = {}
for row in cursor:
res[row[0]] = row[1]
return res
def drct2text(drct):
"""Convert an degree value to text representation of direction.
Args:
drct (int or float): Value in degrees to convert to text
Returns:
str: String representation of the direction, could be `None`
"""
if drct is None:
return None
# Convert the value into a float
drct = float(drct)
if drct > 360:
return None
text = None
if drct >= 350 or drct < 13:
text = "N"
elif drct >= 13 and drct < 35:
text = "NNE"
elif drct >= 35 and drct < 57:
text = "NE"
elif drct >= 57 and drct < 80:
text = "ENE"
elif drct >= 80 and drct < 102:
text = "E"
elif drct >= 102 and drct < 127:
text = "ESE"
elif drct >= 127 and drct < 143:
text = "SE"
elif drct >= 143 and drct < 166:
text = "SSE"
elif drct >= 166 and drct < 190:
text = "S"
elif drct >= 190 and drct < 215:
text = "SSW"
elif drct >= 215 and drct < 237:
text = "SW"
elif drct >= 237 and drct < 260:
text = "WSW"
elif drct >= 260 and drct < 281:
text = "W"
elif drct >= 281 and drct < 304:
text = "WNW"
elif drct >= 304 and drct < 324:
text = "NW"
elif drct >= 324 and drct < 350:
text = "NNW"
return text
def grid_bounds(lons, lats, bounds):
"""Figure out indices that we can truncate big grid
Args:
lons (np.array): grid lons
lats (np.array): grid lats
bounds (list): [x0, y0, x1, y1]
Returns:
[x0, y0, x1, y1]
"""
x0 = 0
x1 = -1
y0 = 0
y1 = -1
if len(lons.shape) == 1:
# Do 1-d work
(x0, x1) = np.digitize([bounds[0], bounds[2]], lons)
(y0, y1) = np.digitize([bounds[1], bounds[3]], lats)
szx = len(lons)
szy = len(lats)
else:
# Do 2-d work
diff = ((lons - bounds[0])**2 + (lats - bounds[1])**2)**0.5
(lly, llx) = np.unravel_index(np.argmin(diff), lons.shape)
diff = ((lons - bounds[2])**2 + (lats - bounds[3])**2)**0.5
(ury, urx) = np.unravel_index(np.argmin(diff), lons.shape)
diff = ((lons - bounds[0])**2 + (lats - bounds[3])**2)**0.5
(uly, ulx) = np.unravel_index(np.argmin(diff), lons.shape)
diff = ((lons - bounds[2])**2 + (lats - bounds[1])**2)**0.5
(lry, lrx) = np.unravel_index(np.argmin(diff), lons.shape)
x0 = min([llx, ulx])
x1 = max([lrx, urx])
y0 = min([lry, lly])
y1 = max([uly, ury])
(szy, szx) = lons.shape
return [int(i) for i in [max([0, x0]), max([0, y0]), min([szx, x1]),
min([szy, y1])]]
if __name__ == '__main__':
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
send2box(['util.py', 'plot.py'], '/bah1/bah2/', remotenames=['util2.py',
'plot.py'])
# mirror2box("/tmp/mytest", "mytest")
| [
"[email protected]"
] | |
3b126b869bfccc6a9d0b195367775643248e1374 | 1caf4418f3549567637f5e9893a445f52a38c6a0 | /CmsAdmin/user_content/api/resources/account_verify_api.py | 69ed7dcf8e0a5f7723260d2e105ebf033f022654 | [] | no_license | Final-Game/social_network_backend | c601563e08c0fd7de72a614944f354ef8d2d31d8 | 8111787d1d20eb87733ae360d8baa745a65e2743 | refs/heads/master | 2023-03-04T21:12:43.147084 | 2021-02-23T03:45:22 | 2021-02-23T03:45:22 | 290,542,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,577 | py | from user_content.api.resources.filters.account_verify_filter import AccountVerifyFilter
from user_content.api.serializers.resources.account_verify_serializer import (
AccountVerifySerializer,
)
from rest_framework.filters import OrderingFilter, SearchFilter
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework.viewsets import ModelViewSet
from user_content.models import AccountVerify
from core.api.rest_frameworks import StandardResultsSetPagination
from core.api.rest_frameworks.order_filter import CustomOrderingFilter
class AccountVerifyApi(ModelViewSet):
queryset = AccountVerify.objects.all()
serializer_class = AccountVerifySerializer
filter_class = AccountVerifyFilter
pagination_class = StandardResultsSetPagination
filter_backends = [
SearchFilter,
OrderingFilter,
CustomOrderingFilter,
DjangoFilterBackend,
]
search_fields = ["account__profile__first_name", "account__profile__last_name"]
ordering_fields = ["created_at", "updated_at"]
def list(self, request, *args, **kwargs):
return super().list(request, *args, **kwargs)
def create(self, request, *args, **kwargs):
return super().create(request, *args, **kwargs)
def update(self, request, *args, **kwargs):
return super().update(request, *args, **kwargs)
def destroy(self, request, *args, **kwargs):
return super().destroy(request, *args, **kwargs)
def retrieve(self, request, *args, **kwargs):
return super().retrieve(request, *args, **kwargs)
| [
"[email protected]"
] | |
a7a6bbcef0abbc635c7fab15e94f6e05e49edb93 | 8a5ccfbd09fdc3eb42e8240c0b7ceaf981f27814 | /astropy_stark/astropy_stark/myresample.py | 3955dbeb0583809c82943f8a3f5932d6b34aed52 | [] | no_license | hlabathems/pycecream | 97edfd388e32ab12b22765debab31ee8c4929ab4 | cd52937c3ff053dede0b02803933ba58789d5ff3 | refs/heads/master | 2020-06-09T22:46:14.114693 | 2019-06-19T17:42:24 | 2019-06-19T17:42:24 | 193,521,752 | 0 | 1 | null | 2019-06-24T14:30:05 | 2019-06-24T14:30:04 | null | UTF-8 | Python | false | false | 6,010 | py | #### code to randomly resample a set of input light curves
#10/9/2017 sampcode 3 update includes sampmin the minum spacing between data points
#sampcode 4, dtave indicates the minimum space between data points, data points will be selected (with no interpolation) from the parent sample, skipping points until the minimum spacing dtave is achieved
##avesamp is the average length of time between the random samples
#set dir = '' and fname=[''] to have this function work on datin[nt,3] and output array rather than save to file
#new 10/9/2017 added option dtsdin need mean and standard deviation of spacing between points e.g setting
#dtsdin very small will give very regularly spaced points.
#if negative then the absolute value is the fraction relative to the mean e.g -0.2 will set the
#standard deviation as a fifth of the mean spacing between points
import numpy as np
import os
import sys
import astropy_stark.myrandom as mr
def myresample(dir,fname,dtave,dtsdin = -0.2, sampmin=0.8,sampcode=3,datin=[]):
if (dtsdin < 0):
dtsd = np.abs(dtsdin)*dtave
else:
dtsd = dtsdin
dirpy=os.getcwd()
#dir = sys.argv[1] #the directory storing the light curves e.g '../fort/fortcode/fakelc/kep_18mar'
#fname = sys.argv[2] # a list of the files .e.g ['file1.dat','file2.dat','...'] etc
#dtave = sys.argv[3] #e.g 0.5 will resample with mean half day cadence
#sampmin = 0.8
#sampcode = 2
#!!### user arguments above. Don't change sampcode or sampmin unless you know what they do (I don't and I wrote the code).###
if (dir != ''):
os.chdir(dir)
Nfile=len(fname)
for ifile in range(Nfile):
if (fname[ifile] == ''):
dat = datin
else:
dat=np.loadtxt(fname[ifile])
t=dat[:,0]
x=dat[:,1]
sig=dat[:,2]
Ndat=t.shape[0]
dt = (t[-1] - t[0])/(Ndat-1)
# below are two versions of the code (the 2nd should be more sophisticated and consider the approximate spacing between each point when making its idxsamp selection
if sampcode == 1:
nidx=(1.-sampmin)*np.random.ranom_sample(1)[0]+sampmin
idxsamp=np.random.rand(low=0,high=Ndat,size=nidx)
datsamp=np.zeros((nidx,3))
datsamp[:,0]=t[idxsamp]
datsamp[:,1]=x[idxsamp]
datsamp[:,2]=sig[idxsamp]
elif sampcode == 2:
idxcount=0
tthen=t[0]
idxsamp=[]
xn = []
sign = []
tn = []
while (idxcount < Ndat) & (tthen < t[-1]):
a = np.random.randn(1)*dt*2
tnow = tthen + dtave + a
tn.append(tnow)
xn.append(np.interp([tnow],t,x)[0])
sign.append(np.interp([tnow],t,sig)[0])
#idxsamp.append(np.abs(t-tnow).argmin()) ## index of closest time to tnow
tthen=tnow
idxcount=idxcount+1
#idxsamp=np.array(idxsamp)
tn = np.array(tn)
xn = np.array(xn)
sign = np.array(sign)
nn = xn.shape[0]
datsamp=np.zeros((nn,3))
datsamp[:,0]=tn[:,0]
datsamp[:,1]=xn[:,0]
datsamp[:,2]=sign[:,0]
elif sampcode == 3:
#print 'ararar'
idxcount=0
tthen=t[0]
idxsamp=[]
tlast = t[-1]
while (idxcount < Ndat-1) & (tthen < tlast - 4*sampmin):
#a = np.random.randn(1)*dtave
a = np.random.normal(dtave,dtsd,1)[0]
tnow = tthen + np.abs(a)
idxtemp = np.abs(t-tnow).argmin()
#print tnow,tthen,'before mrs'
#print tnow, tthen, idxcount, sampmin
if ((idxtemp not in idxsamp) and ((tnow - tthen > sampmin) or (tnow > tlast - sampmin))):
idxsamp.append(idxtemp) ## index of closest time to tnow
idxcount=idxcount+1
a= 1.*tthen-tnow
tthen=tnow
#print idxcount, Ndat, tthen, t[-1],'mrs'
idxsamp=np.array(idxsamp)
datsamp=np.zeros((idxsamp.shape[0],3))
datsamp[:,0]=t[idxsamp]
datsamp[:,1]=x[idxsamp]
datsamp[:,2]=sig[idxsamp]
ttemp_space = datsamp[1:,0]-datsamp[:-1,0]
#print('min,max,and ave spacing between elements',ttemp_space.min(), ttemp_space.max(), np.mean(ttemp_space))
elif sampcode == 4:
idxcount=0
tthen=t[0]
idxsamp=[]
while (idxcount < Ndat) & (tthen < t[-1]):
tnow = tthen + dtave
b = t>tnow
idxtemp = [i for i, elem in enumerate(b, 1) if elem]
if (len(idxtemp) ==0):
break
idxtemp = idxtemp[0]
if (idxtemp >= t.shape[0]):
break
if (idxtemp not in idxsamp):
idxsamp.append(idxtemp) ## index of closest time to tnow
idxcount=idxcount+1
a= tnow - tthen
tthen=t[idxtemp]
#if (t[idxtemp] - t[idxtemp-1] < dtave):
#print 'PROBLEM!!',
#print t[idxtemp] - t[idxtemp-1], dtave#print tnow, idxcount, dtave, dt, tthen, a
#raw_input()
#print idxsamp
idxsamp=np.array(idxsamp)
datsamp=np.zeros((idxsamp.shape[0],3))
datsamp[:,0]=t[idxsamp]
datsamp[:,1]=x[idxsamp]
datsamp[:,2]=sig[idxsamp]
ttemp_space = datsamp[1:,0]-datsamp[:-1,0]
ns = len(idxsamp)
#for i in range(ns):
# print i,datsamp[i,:]
#print('min,max,and ave spacing between elements',ttemp_space.min(), ttemp_space.max(), np.mean(ttemp_space))
#print('locations...',ttemp_space.argmin(),ttemp_space.argmax())
np.savetxt('resamp_'+fname[ifile],datsamp)
os.chdir(dirpy) # change back to python directory
return(datsamp)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.