blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c258b35a94733feceb797ff62582f7f2124933d7 | 632d7759536ed0726499c2d52c8eb13b5ab213ab | /Data/Packages/pygments/all/pygments/lexers/scripting.py | bffd8c0036ee4125ba80cc336285a932ba6d69e4 | [] | no_license | Void2403/sublime_text_3_costomize | e660ad803eb12b20e9fa7f8eb7c6aad0f2b4d9bc | c19977e498bd948fd6d8f55bd48c8d82cbc317c3 | refs/heads/master | 2023-08-31T21:32:32.791574 | 2019-05-31T11:46:19 | 2019-05-31T11:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 56,509 | py | # -*- coding: utf-8 -*-
"""
pygments.lexers.scripting
~~~~~~~~~~~~~~~~~~~~~~~~~
Lexer for scripting and embedded languages.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, bygroups, default, combined, \
words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation, Error, Whitespace
from pygments.util import get_bool_opt, get_list_opt, iteritems
__all__ = ['LuaLexer', 'MoonScriptLexer', 'ChaiscriptLexer', 'LSLLexer',
'AppleScriptLexer', 'RexxLexer', 'MOOCodeLexer', 'HybrisLexer']
class LuaLexer(RegexLexer):
"""
For `Lua <http://www.lua.org>`_ source code.
Additional options accepted:
`func_name_highlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabled_modules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted.
To get a list of allowed modules have a look into the
`_lua_builtins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._lua_builtins import MODULES
>>> MODULES.keys()
['string', 'coroutine', 'modules', 'io', 'basic', ...]
"""
name = 'Lua'
aliases = ['lua']
filenames = ['*.lua', '*.wlua']
mimetypes = ['text/x-lua', 'application/x-lua']
tokens = {
'root': [
# lua allows a file to start with a shebang
(r'#!(.*?)$', Comment.Preproc),
default('base'),
],
'base': [
(r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline),
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
('(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]', Text),
# multiline strings
(r'(?s)\[(=*)\[.*?\]\1\]', String),
(r'(==|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#])', Operator),
(r'[\[\]{}().,:;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|'
r'while)\b', Keyword),
(r'(local)\b', Keyword.Declaration),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(function)\b', Keyword, 'funcname'),
(r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'funcname': [
(r'\s+', Text),
('(?:([A-Za-z_]\w*)(\.))?([A-Za-z_]\w*)',
bygroups(Name.Class, Punctuation, Name.Function), '#pop'),
# inline function
('\(', Punctuation, '#pop'),
],
# if I understand correctly, every character is valid in a lua string,
# so this state is only for later corrections
'string': [
('.', String)
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String, '#pop'),
include('string')
],
'dqs': [
('"', String, '#pop'),
include('string')
]
}
def __init__(self, **options):
self.func_name_highlighting = get_bool_opt(
options, 'func_name_highlighting', True)
self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._lua_builtins import MODULES
for mod, func in iteritems(MODULES):
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self._functions:
yield index, Name.Builtin, value
continue
elif '.' in value:
a, b = value.split('.')
yield index, Name, a
yield index + len(a), Punctuation, u'.'
yield index + len(a) + 1, Name, b
continue
yield index, token, value
class MoonScriptLexer(LuaLexer):
"""
For `MoonScript <http://moonscript.org>`_ source code.
.. versionadded:: 1.5
"""
name = "MoonScript"
aliases = ["moon", "moonscript"]
filenames = ["*.moon"]
mimetypes = ['text/x-moonscript', 'application/x-moonscript']
tokens = {
'root': [
(r'#!(.*?)$', Comment.Preproc),
default('base'),
],
'base': [
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
(r'(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'(?s)\[(=*)\[.*?\]\1\]', String),
(r'(->|=>)', Name.Function),
(r':[a-zA-Z_]\w*', Name.Variable),
(r'(==|!=|~=|<=|>=|\.\.\.|\.\.|[=+\-*/%^<>#!.\\:])', Operator),
(r'[;,]', Punctuation),
(r'[\[\]{}()]', Keyword.Type),
(r'[a-zA-Z_]\w*:', Name.Variable),
(words((
'class', 'extends', 'if', 'then', 'super', 'do', 'with',
'import', 'export', 'while', 'elseif', 'return', 'for', 'in',
'from', 'when', 'using', 'else', 'and', 'or', 'not', 'switch',
'break'), suffix=r'\b'),
Keyword),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(and|or|not)\b', Operator.Word),
(r'(self)\b', Name.Builtin.Pseudo),
(r'@@?([a-zA-Z_]\w*)?', Name.Variable.Class),
(r'[A-Z]\w*', Name.Class), # proper name
(r'[A-Za-z_]\w*(\.[A-Za-z_]\w*)?', Name),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String.Single, '#pop'),
(".", String)
],
'dqs': [
('"', String.Double, '#pop'),
(".", String)
]
}
def get_tokens_unprocessed(self, text):
# set . as Operator instead of Punctuation
for index, token, value in LuaLexer.get_tokens_unprocessed(self, text):
if token == Punctuation and value == ".":
token = Operator
yield index, token, value
class ChaiscriptLexer(RegexLexer):
"""
For `ChaiScript <http://chaiscript.com/>`_ source code.
.. versionadded:: 2.0
"""
name = 'ChaiScript'
aliases = ['chai', 'chaiscript']
filenames = ['*.chai']
mimetypes = ['text/x-chaiscript', 'application/x-chaiscript']
flags = re.DOTALL | re.MULTILINE
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'^\#.*?\n', Comment.Single)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
default('#pop')
],
'badregex': [
(r'\n', Text, '#pop')
],
'root': [
include('commentsandwhitespace'),
(r'\n', Text),
(r'[^\S\n]+', Text),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|\.\.'
r'(<<|>>>?|==?|!=?|[-<>+*%&|^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'[=+\-*/]', Operator),
(r'(for|in|while|do|break|return|continue|if|else|'
r'throw|try|catch'
r')\b', Keyword, 'slashstartsregex'),
(r'(var)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(attr|def|fun)\b', Keyword.Reserved),
(r'(true|false)\b', Keyword.Constant),
(r'(eval|throw)\b', Name.Builtin),
(r'`\S+`', Name.Builtin),
(r'[$a-zA-Z_]\w*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"', String.Double, 'dqstring'),
(r"'(\\\\|\\'|[^'])*'", String.Single),
],
'dqstring': [
(r'\$\{[^"}]+?\}', String.Interpol),
(r'\$', String.Double),
(r'\\\\', String.Double),
(r'\\"', String.Double),
(r'[^\\"$]+', String.Double),
(r'"', String.Double, '#pop'),
],
}
class LSLLexer(RegexLexer):
"""
For Second Life's Linden Scripting Language source code.
.. versionadded:: 2.0
"""
name = 'LSL'
aliases = ['lsl']
filenames = ['*.lsl']
mimetypes = ['text/x-lsl']
flags = re.MULTILINE
lsl_keywords = r'\b(?:do|else|for|if|jump|return|while)\b'
lsl_types = r'\b(?:float|integer|key|list|quaternion|rotation|string|vector)\b'
lsl_states = r'\b(?:(?:state)\s+\w+|default)\b'
lsl_events = r'\b(?:state_(?:entry|exit)|touch(?:_(?:start|end))?|(?:land_)?collision(?:_(?:start|end))?|timer|listen|(?:no_)?sensor|control|(?:not_)?at_(?:rot_)?target|money|email|run_time_permissions|changed|attach|dataserver|moving_(?:start|end)|link_message|(?:on|object)_rez|remote_data|http_re(?:sponse|quest)|path_update|transaction_result)\b'
lsl_functions_builtin = r'\b(?:ll(?:ReturnObjectsBy(?:ID|Owner)|Json(?:2List|[GS]etValue|ValueType)|Sin|Cos|Tan|Atan2|Sqrt|Pow|Abs|Fabs|Frand|Floor|Ceil|Round|Vec(?:Mag|Norm|Dist)|Rot(?:Between|2(?:Euler|Fwd|Left|Up))|(?:Euler|Axes)2Rot|Whisper|(?:Region|Owner)?Say|Shout|Listen(?:Control|Remove)?|Sensor(?:Repeat|Remove)?|Detected(?:Name|Key|Owner|Type|Pos|Vel|Grab|Rot|Group|LinkNumber)|Die|Ground|Wind|(?:[GS]et)(?:AnimationOverride|MemoryLimit|PrimMediaParams|ParcelMusicURL|Object(?:Desc|Name)|PhysicsMaterial|Status|Scale|Color|Alpha|Texture|Pos|Rot|Force|Torque)|ResetAnimationOverride|(?:Scale|Offset|Rotate)Texture|(?:Rot)?Target(?:Remove)?|(?:Stop)?MoveToTarget|Apply(?:Rotational)?Impulse|Set(?:KeyframedMotion|ContentType|RegionPos|(?:Angular)?Velocity|Buoyancy|HoverHeight|ForceAndTorque|TimerEvent|ScriptState|Damage|TextureAnim|Sound(?:Queueing|Radius)|Vehicle(?:Type|(?:Float|Vector|Rotation)Param)|(?:Touch|Sit)?Text|Camera(?:Eye|At)Offset|PrimitiveParams|ClickAction|Link(?:Alpha|Color|PrimitiveParams(?:Fast)?|Texture(?:Anim)?|Camera|Media)|RemoteScriptAccessPin|PayPrice|LocalRot)|ScaleByFactor|Get(?:(?:Max|Min)ScaleFactor|ClosestNavPoint|StaticPath|SimStats|Env|PrimitiveParams|Link(?:PrimitiveParams|Number(?:OfSides)?|Key|Name|Media)|HTTPHeader|FreeURLs|Object(?:Details|PermMask|PrimCount)|Parcel(?:MaxPrims|Details|Prim(?:Count|Owners))|Attached|(?:SPMax|Free|Used)Memory|Region(?:Name|TimeDilation|FPS|Corner|AgentCount)|Root(?:Position|Rotation)|UnixTime|(?:Parcel|Region)Flags|(?:Wall|GMT)clock|SimulatorHostname|BoundingBox|GeometricCenter|Creator|NumberOf(?:Prims|NotecardLines|Sides)|Animation(?:List)?|(?:Camera|Local)(?:Pos|Rot)|Vel|Accel|Omega|Time(?:stamp|OfDay)|(?:Object|CenterOf)?Mass|MassMKS|Energy|Owner|(?:Owner)?Key|SunDirection|Texture(?:Offset|Scale|Rot)|Inventory(?:Number|Name|Key|Type|Creator|PermMask)|Permissions(?:Key)?|StartParameter|List(?:Length|EntryType)|Date|Agent(?:Size|Info|Language|List)|LandOwnerAt|NotecardLine|Script(?:Name|State))|(?:Get|Reset|GetAndReset)Time|PlaySound(?:Slave)?|LoopSound(?:Master|Slave)?|(?:Trigger|Stop|Preload)Sound|(?:(?:Get|Delete)Sub|Insert)String|To(?:Upper|Lower)|Give(?:InventoryList|Money)|RezObject|(?:Stop)?LookAt|Sleep|CollisionFilter|(?:Take|Release)Controls|DetachFromAvatar|AttachToAvatar(?:Temp)?|InstantMessage|(?:GetNext)?Email|StopHover|MinEventDelay|RotLookAt|String(?:Length|Trim)|(?:Start|Stop)Animation|TargetOmega|RequestPermissions|(?:Create|Break)Link|BreakAllLinks|(?:Give|Remove)Inventory|Water|PassTouches|Request(?:Agent|Inventory)Data|TeleportAgent(?:Home|GlobalCoords)?|ModifyLand|CollisionSound|ResetScript|MessageLinked|PushObject|PassCollisions|AxisAngle2Rot|Rot2(?:Axis|Angle)|A(?:cos|sin)|AngleBetween|AllowInventoryDrop|SubStringIndex|List2(?:CSV|Integer|Json|Float|String|Key|Vector|Rot|List(?:Strided)?)|DeleteSubList|List(?:Statistics|Sort|Randomize|(?:Insert|Find|Replace)List)|EdgeOfWorld|AdjustSoundVolume|Key2Name|TriggerSoundLimited|EjectFromLand|(?:CSV|ParseString)2List|OverMyLand|SameGroup|UnSit|Ground(?:Slope|Normal|Contour)|GroundRepel|(?:Set|Remove)VehicleFlags|(?:AvatarOn)?(?:Link)?SitTarget|Script(?:Danger|Profiler)|Dialog|VolumeDetect|ResetOtherScript|RemoteLoadScriptPin|(?:Open|Close)RemoteDataChannel|SendRemoteData|RemoteDataReply|(?:Integer|String)ToBase64|XorBase64|Log(?:10)?|Base64To(?:String|Integer)|ParseStringKeepNulls|RezAtRoot|RequestSimulatorData|ForceMouselook|(?:Load|Release|(?:E|Une)scape)URL|ParcelMedia(?:CommandList|Query)|ModPow|MapDestination|(?:RemoveFrom|AddTo|Reset)Land(?:Pass|Ban)List|(?:Set|Clear)CameraParams|HTTP(?:Request|Response)|TextBox|DetectedTouch(?:UV|Face|Pos|(?:N|Bin)ormal|ST)|(?:MD5|SHA1|DumpList2)String|Request(?:Secure)?URL|Clear(?:Prim|Link)Media|(?:Link)?ParticleSystem|(?:Get|Request)(?:Username|DisplayName)|RegionSayTo|CastRay|GenerateKey|TransferLindenDollars|ManageEstateAccess|(?:Create|Delete)Character|ExecCharacterCmd|Evade|FleeFrom|NavigateTo|PatrolPoints|Pursue|UpdateCharacter|WanderWithin))\b'
lsl_constants_float = r'\b(?:DEG_TO_RAD|PI(?:_BY_TWO)?|RAD_TO_DEG|SQRT2|TWO_PI)\b'
lsl_constants_integer = r'\b(?:JSON_APPEND|STATUS_(?:PHYSICS|ROTATE_[XYZ]|PHANTOM|SANDBOX|BLOCK_GRAB(?:_OBJECT)?|(?:DIE|RETURN)_AT_EDGE|CAST_SHADOWS|OK|MALFORMED_PARAMS|TYPE_MISMATCH|BOUNDS_ERROR|NOT_(?:FOUND|SUPPORTED)|INTERNAL_ERROR|WHITELIST_FAILED)|AGENT(?:_(?:BY_(?:LEGACY_|USER)NAME|FLYING|ATTACHMENTS|SCRIPTED|MOUSELOOK|SITTING|ON_OBJECT|AWAY|WALKING|IN_AIR|TYPING|CROUCHING|BUSY|ALWAYS_RUN|AUTOPILOT|LIST_(?:PARCEL(?:_OWNER)?|REGION)))?|CAMERA_(?:PITCH|DISTANCE|BEHINDNESS_(?:ANGLE|LAG)|(?:FOCUS|POSITION)(?:_(?:THRESHOLD|LOCKED|LAG))?|FOCUS_OFFSET|ACTIVE)|ANIM_ON|LOOP|REVERSE|PING_PONG|SMOOTH|ROTATE|SCALE|ALL_SIDES|LINK_(?:ROOT|SET|ALL_(?:OTHERS|CHILDREN)|THIS)|ACTIVE|PASSIVE|SCRIPTED|CONTROL_(?:FWD|BACK|(?:ROT_)?(?:LEFT|RIGHT)|UP|DOWN|(?:ML_)?LBUTTON)|PERMISSION_(?:RETURN_OBJECTS|DEBIT|OVERRIDE_ANIMATIONS|SILENT_ESTATE_MANAGEMENT|TAKE_CONTROLS|TRIGGER_ANIMATION|ATTACH|CHANGE_LINKS|(?:CONTROL|TRACK)_CAMERA|TELEPORT)|INVENTORY_(?:TEXTURE|SOUND|OBJECT|SCRIPT|LANDMARK|CLOTHING|NOTECARD|BODYPART|ANIMATION|GESTURE|ALL|NONE)|CHANGED_(?:INVENTORY|COLOR|SHAPE|SCALE|TEXTURE|LINK|ALLOWED_DROP|OWNER|REGION(?:_START)?|TELEPORT|MEDIA)|OBJECT_(?:(?:PHYSICS|SERVER|STREAMING)_COST|UNKNOWN_DETAIL|CHARACTER_TIME|PHANTOM|PHYSICS|TEMP_ON_REZ|NAME|DESC|POS|PRIM_EQUIVALENCE|RETURN_(?:PARCEL(?:_OWNER)?|REGION)|ROO?T|VELOCITY|OWNER|GROUP|CREATOR|ATTACHED_POINT|RENDER_WEIGHT|PATHFINDING_TYPE|(?:RUNNING|TOTAL)_SCRIPT_COUNT|SCRIPT_(?:MEMORY|TIME))|TYPE_(?:INTEGER|FLOAT|STRING|KEY|VECTOR|ROTATION|INVALID)|(?:DEBUG|PUBLIC)_CHANNEL|ATTACH_(?:AVATAR_CENTER|CHEST|HEAD|BACK|PELVIS|MOUTH|CHIN|NECK|NOSE|BELLY|[LR](?:SHOULDER|HAND|FOOT|EAR|EYE|[UL](?:ARM|LEG)|HIP)|(?:LEFT|RIGHT)_PEC|HUD_(?:CENTER_[12]|TOP_(?:RIGHT|CENTER|LEFT)|BOTTOM(?:_(?:RIGHT|LEFT))?))|LAND_(?:LEVEL|RAISE|LOWER|SMOOTH|NOISE|REVERT)|DATA_(?:ONLINE|NAME|BORN|SIM_(?:POS|STATUS|RATING)|PAYINFO)|PAYMENT_INFO_(?:ON_FILE|USED)|REMOTE_DATA_(?:CHANNEL|REQUEST|REPLY)|PSYS_(?:PART_(?:BF_(?:ZERO|ONE(?:_MINUS_(?:DEST_COLOR|SOURCE_(ALPHA|COLOR)))?|DEST_COLOR|SOURCE_(ALPHA|COLOR))|BLEND_FUNC_(DEST|SOURCE)|FLAGS|(?:START|END)_(?:COLOR|ALPHA|SCALE|GLOW)|MAX_AGE|(?:RIBBON|WIND|INTERP_(?:COLOR|SCALE)|BOUNCE|FOLLOW_(?:SRC|VELOCITY)|TARGET_(?:POS|LINEAR)|EMISSIVE)_MASK)|SRC_(?:MAX_AGE|PATTERN|ANGLE_(?:BEGIN|END)|BURST_(?:RATE|PART_COUNT|RADIUS|SPEED_(?:MIN|MAX))|ACCEL|TEXTURE|TARGET_KEY|OMEGA|PATTERN_(?:DROP|EXPLODE|ANGLE(?:_CONE(?:_EMPTY)?)?)))|VEHICLE_(?:REFERENCE_FRAME|TYPE_(?:NONE|SLED|CAR|BOAT|AIRPLANE|BALLOON)|(?:LINEAR|ANGULAR)_(?:FRICTION_TIMESCALE|MOTOR_DIRECTION)|LINEAR_MOTOR_OFFSET|HOVER_(?:HEIGHT|EFFICIENCY|TIMESCALE)|BUOYANCY|(?:LINEAR|ANGULAR)_(?:DEFLECTION_(?:EFFICIENCY|TIMESCALE)|MOTOR_(?:DECAY_)?TIMESCALE)|VERTICAL_ATTRACTION_(?:EFFICIENCY|TIMESCALE)|BANKING_(?:EFFICIENCY|MIX|TIMESCALE)|FLAG_(?:NO_DEFLECTION_UP|LIMIT_(?:ROLL_ONLY|MOTOR_UP)|HOVER_(?:(?:WATER|TERRAIN|UP)_ONLY|GLOBAL_HEIGHT)|MOUSELOOK_(?:STEER|BANK)|CAMERA_DECOUPLED))|PRIM_(?:TYPE(?:_(?:BOX|CYLINDER|PRISM|SPHERE|TORUS|TUBE|RING|SCULPT))?|HOLE_(?:DEFAULT|CIRCLE|SQUARE|TRIANGLE)|MATERIAL(?:_(?:STONE|METAL|GLASS|WOOD|FLESH|PLASTIC|RUBBER))?|SHINY_(?:NONE|LOW|MEDIUM|HIGH)|BUMP_(?:NONE|BRIGHT|DARK|WOOD|BARK|BRICKS|CHECKER|CONCRETE|TILE|STONE|DISKS|GRAVEL|BLOBS|SIDING|LARGETILE|STUCCO|SUCTION|WEAVE)|TEXGEN_(?:DEFAULT|PLANAR)|SCULPT_(?:TYPE_(?:SPHERE|TORUS|PLANE|CYLINDER|MASK)|FLAG_(?:MIRROR|INVERT))|PHYSICS(?:_(?:SHAPE_(?:CONVEX|NONE|PRIM|TYPE)))?|(?:POS|ROT)_LOCAL|SLICE|TEXT|FLEXIBLE|POINT_LIGHT|TEMP_ON_REZ|PHANTOM|POSITION|SIZE|ROTATION|TEXTURE|NAME|OMEGA|DESC|LINK_TARGET|COLOR|BUMP_SHINY|FULLBRIGHT|TEXGEN|GLOW|MEDIA_(?:ALT_IMAGE_ENABLE|CONTROLS|(?:CURRENT|HOME)_URL|AUTO_(?:LOOP|PLAY|SCALE|ZOOM)|FIRST_CLICK_INTERACT|(?:WIDTH|HEIGHT)_PIXELS|WHITELIST(?:_ENABLE)?|PERMS_(?:INTERACT|CONTROL)|PARAM_MAX|CONTROLS_(?:STANDARD|MINI)|PERM_(?:NONE|OWNER|GROUP|ANYONE)|MAX_(?:URL_LENGTH|WHITELIST_(?:SIZE|COUNT)|(?:WIDTH|HEIGHT)_PIXELS)))|MASK_(?:BASE|OWNER|GROUP|EVERYONE|NEXT)|PERM_(?:TRANSFER|MODIFY|COPY|MOVE|ALL)|PARCEL_(?:MEDIA_COMMAND_(?:STOP|PAUSE|PLAY|LOOP|TEXTURE|URL|TIME|AGENT|UNLOAD|AUTO_ALIGN|TYPE|SIZE|DESC|LOOP_SET)|FLAG_(?:ALLOW_(?:FLY|(?:GROUP_)?SCRIPTS|LANDMARK|TERRAFORM|DAMAGE|CREATE_(?:GROUP_)?OBJECTS)|USE_(?:ACCESS_(?:GROUP|LIST)|BAN_LIST|LAND_PASS_LIST)|LOCAL_SOUND_ONLY|RESTRICT_PUSHOBJECT|ALLOW_(?:GROUP|ALL)_OBJECT_ENTRY)|COUNT_(?:TOTAL|OWNER|GROUP|OTHER|SELECTED|TEMP)|DETAILS_(?:NAME|DESC|OWNER|GROUP|AREA|ID|SEE_AVATARS))|LIST_STAT_(?:MAX|MIN|MEAN|MEDIAN|STD_DEV|SUM(?:_SQUARES)?|NUM_COUNT|GEOMETRIC_MEAN|RANGE)|PAY_(?:HIDE|DEFAULT)|REGION_FLAG_(?:ALLOW_DAMAGE|FIXED_SUN|BLOCK_TERRAFORM|SANDBOX|DISABLE_(?:COLLISIONS|PHYSICS)|BLOCK_FLY|ALLOW_DIRECT_TELEPORT|RESTRICT_PUSHOBJECT)|HTTP_(?:METHOD|MIMETYPE|BODY_(?:MAXLENGTH|TRUNCATED)|CUSTOM_HEADER|PRAGMA_NO_CACHE|VERBOSE_THROTTLE|VERIFY_CERT)|STRING_(?:TRIM(?:_(?:HEAD|TAIL))?)|CLICK_ACTION_(?:NONE|TOUCH|SIT|BUY|PAY|OPEN(?:_MEDIA)?|PLAY|ZOOM)|TOUCH_INVALID_FACE|PROFILE_(?:NONE|SCRIPT_MEMORY)|RC_(?:DATA_FLAGS|DETECT_PHANTOM|GET_(?:LINK_NUM|NORMAL|ROOT_KEY)|MAX_HITS|REJECT_(?:TYPES|AGENTS|(?:NON)?PHYSICAL|LAND))|RCERR_(?:CAST_TIME_EXCEEDED|SIM_PERF_LOW|UNKNOWN)|ESTATE_ACCESS_(?:ALLOWED_(?:AGENT|GROUP)_(?:ADD|REMOVE)|BANNED_AGENT_(?:ADD|REMOVE))|DENSITY|FRICTION|RESTITUTION|GRAVITY_MULTIPLIER|KFM_(?:COMMAND|CMD_(?:PLAY|STOP|PAUSE|SET_MODE)|MODE|FORWARD|LOOP|PING_PONG|REVERSE|DATA|ROTATION|TRANSLATION)|ERR_(?:GENERIC|PARCEL_PERMISSIONS|MALFORMED_PARAMS|RUNTIME_PERMISSIONS|THROTTLED)|CHARACTER_(?:CMD_(?:(?:SMOOTH_)?STOP|JUMP)|DESIRED_(?:TURN_)?SPEED|RADIUS|STAY_WITHIN_PARCEL|LENGTH|ORIENTATION|ACCOUNT_FOR_SKIPPED_FRAMES|AVOIDANCE_MODE|TYPE(?:_(?:[A-D]|NONE))?|MAX_(?:DECEL|TURN_RADIUS|(?:ACCEL|SPEED)))|PURSUIT_(?:OFFSET|FUZZ_FACTOR|GOAL_TOLERANCE|INTERCEPT)|REQUIRE_LINE_OF_SIGHT|FORCE_DIRECT_PATH|VERTICAL|HORIZONTAL|AVOID_(?:CHARACTERS|DYNAMIC_OBSTACLES|NONE)|PU_(?:EVADE_(?:HIDDEN|SPOTTED)|FAILURE_(?:DYNAMIC_PATHFINDING_DISABLED|INVALID_(?:GOAL|START)|NO_(?:NAVMESH|VALID_DESTINATION)|OTHER|TARGET_GONE|(?:PARCEL_)?UNREACHABLE)|(?:GOAL|SLOWDOWN_DISTANCE)_REACHED)|TRAVERSAL_TYPE(?:_(?:FAST|NONE|SLOW))?|CONTENT_TYPE_(?:ATOM|FORM|HTML|JSON|LLSD|RSS|TEXT|XHTML|XML)|GCNP_(?:RADIUS|STATIC)|(?:PATROL|WANDER)_PAUSE_AT_WAYPOINTS|OPT_(?:AVATAR|CHARACTER|EXCLUSION_VOLUME|LEGACY_LINKSET|MATERIAL_VOLUME|OTHER|STATIC_OBSTACLE|WALKABLE)|SIM_STAT_PCT_CHARS_STEPPED)\b'
lsl_constants_integer_boolean = r'\b(?:FALSE|TRUE)\b'
lsl_constants_rotation = r'\b(?:ZERO_ROTATION)\b'
lsl_constants_string = r'\b(?:EOF|JSON_(?:ARRAY|DELETE|FALSE|INVALID|NULL|NUMBER|OBJECT|STRING|TRUE)|NULL_KEY|TEXTURE_(?:BLANK|DEFAULT|MEDIA|PLYWOOD|TRANSPARENT)|URL_REQUEST_(?:GRANTED|DENIED))\b'
lsl_constants_vector = r'\b(?:TOUCH_INVALID_(?:TEXCOORD|VECTOR)|ZERO_VECTOR)\b'
lsl_invalid_broken = r'\b(?:LAND_(?:LARGE|MEDIUM|SMALL)_BRUSH)\b'
lsl_invalid_deprecated = r'\b(?:ATTACH_[LR]PEC|DATA_RATING|OBJECT_ATTACHMENT_(?:GEOMETRY_BYTES|SURFACE_AREA)|PRIM_(?:CAST_SHADOWS|MATERIAL_LIGHT|TYPE_LEGACY)|PSYS_SRC_(?:INNER|OUTER)ANGLE|VEHICLE_FLAG_NO_FLY_UP|ll(?:Cloud|Make(?:Explosion|Fountain|Smoke|Fire)|RemoteDataSetRegion|Sound(?:Preload)?|XorBase64Strings(?:Correct)?))\b'
lsl_invalid_illegal = r'\b(?:event)\b'
lsl_invalid_unimplemented = r'\b(?:CHARACTER_(?:MAX_ANGULAR_(?:ACCEL|SPEED)|TURN_SPEED_MULTIPLIER)|PERMISSION_(?:CHANGE_(?:JOINTS|PERMISSIONS)|RELEASE_OWNERSHIP|REMAP_CONTROLS)|PRIM_PHYSICS_MATERIAL|PSYS_SRC_OBJ_REL_MASK|ll(?:CollisionSprite|(?:Stop)?PointAt|(?:(?:Refresh|Set)Prim)URL|(?:Take|Release)Camera|RemoteLoadScript))\b'
lsl_reserved_godmode = r'\b(?:ll(?:GodLikeRezObject|Set(?:Inventory|Object)PermMask))\b'
lsl_reserved_log = r'\b(?:print)\b'
lsl_operators = r'\+\+|\-\-|<<|>>|&&?|\|\|?|\^|~|[!%<>=*+\-/]=?'
tokens = {
'root':
[
(r'//.*?\n', Comment.Single),
(r'/\*', Comment.Multiline, 'comment'),
(r'"', String.Double, 'string'),
(lsl_keywords, Keyword),
(lsl_types, Keyword.Type),
(lsl_states, Name.Class),
(lsl_events, Name.Builtin),
(lsl_functions_builtin, Name.Function),
(lsl_constants_float, Keyword.Constant),
(lsl_constants_integer, Keyword.Constant),
(lsl_constants_integer_boolean, Keyword.Constant),
(lsl_constants_rotation, Keyword.Constant),
(lsl_constants_string, Keyword.Constant),
(lsl_constants_vector, Keyword.Constant),
(lsl_invalid_broken, Error),
(lsl_invalid_deprecated, Error),
(lsl_invalid_illegal, Error),
(lsl_invalid_unimplemented, Error),
(lsl_reserved_godmode, Keyword.Reserved),
(lsl_reserved_log, Keyword.Reserved),
(r'\b([a-zA-Z_]\w*)\b', Name.Variable),
(r'(\d+\.\d*|\.\d+|\d+)[eE][+-]?\d*', Number.Float),
(r'(\d+\.\d*|\.\d+)', Number.Float),
(r'0[xX][0-9a-fA-F]+', Number.Hex),
(r'\d+', Number.Integer),
(lsl_operators, Operator),
(r':=?', Error),
(r'[,;{}()\[\]]', Punctuation),
(r'\n+', Whitespace),
(r'\s+', Whitespace)
],
'comment':
[
(r'[^*/]+', Comment.Multiline),
(r'/\*', Comment.Multiline, '#push'),
(r'\*/', Comment.Multiline, '#pop'),
(r'[*/]', Comment.Multiline)
],
'string':
[
(r'\\([nt"\\])', String.Escape),
(r'"', String.Double, '#pop'),
(r'\\.', Error),
(r'[^"\\]+', String.Double),
]
}
class AppleScriptLexer(RegexLexer):
"""
For `AppleScript source code
<http://developer.apple.com/documentation/AppleScript/
Conceptual/AppleScriptLangGuide>`_,
including `AppleScript Studio
<http://developer.apple.com/documentation/AppleScript/
Reference/StudioReference>`_.
Contributed by Andreas Amann <[email protected]>.
.. versionadded:: 1.0
"""
name = 'AppleScript'
aliases = ['applescript']
filenames = ['*.applescript']
flags = re.MULTILINE | re.DOTALL
Identifiers = r'[a-zA-Z]\w*'
# XXX: use words() for all of these
Literals = ('AppleScript', 'current application', 'false', 'linefeed',
'missing value', 'pi', 'quote', 'result', 'return', 'space',
'tab', 'text item delimiters', 'true', 'version')
Classes = ('alias ', 'application ', 'boolean ', 'class ', 'constant ',
'date ', 'file ', 'integer ', 'list ', 'number ', 'POSIX file ',
'real ', 'record ', 'reference ', 'RGB color ', 'script ',
'text ', 'unit types', '(?:Unicode )?text', 'string')
BuiltIn = ('attachment', 'attribute run', 'character', 'day', 'month',
'paragraph', 'word', 'year')
HandlerParams = ('about', 'above', 'against', 'apart from', 'around',
'aside from', 'at', 'below', 'beneath', 'beside',
'between', 'for', 'given', 'instead of', 'on', 'onto',
'out of', 'over', 'since')
Commands = ('ASCII (character|number)', 'activate', 'beep', 'choose URL',
'choose application', 'choose color', 'choose file( name)?',
'choose folder', 'choose from list',
'choose remote application', 'clipboard info',
'close( access)?', 'copy', 'count', 'current date', 'delay',
'delete', 'display (alert|dialog)', 'do shell script',
'duplicate', 'exists', 'get eof', 'get volume settings',
'info for', 'launch', 'list (disks|folder)', 'load script',
'log', 'make', 'mount volume', 'new', 'offset',
'open( (for access|location))?', 'path to', 'print', 'quit',
'random number', 'read', 'round', 'run( script)?',
'say', 'scripting components',
'set (eof|the clipboard to|volume)', 'store script',
'summarize', 'system attribute', 'system info',
'the clipboard', 'time to GMT', 'write', 'quoted form')
References = ('(in )?back of', '(in )?front of', '[0-9]+(st|nd|rd|th)',
'first', 'second', 'third', 'fourth', 'fifth', 'sixth',
'seventh', 'eighth', 'ninth', 'tenth', 'after', 'back',
'before', 'behind', 'every', 'front', 'index', 'last',
'middle', 'some', 'that', 'through', 'thru', 'where', 'whose')
Operators = ("and", "or", "is equal", "equals", "(is )?equal to", "is not",
"isn't", "isn't equal( to)?", "is not equal( to)?",
"doesn't equal", "does not equal", "(is )?greater than",
"comes after", "is not less than or equal( to)?",
"isn't less than or equal( to)?", "(is )?less than",
"comes before", "is not greater than or equal( to)?",
"isn't greater than or equal( to)?",
"(is )?greater than or equal( to)?", "is not less than",
"isn't less than", "does not come before",
"doesn't come before", "(is )?less than or equal( to)?",
"is not greater than", "isn't greater than",
"does not come after", "doesn't come after", "starts? with",
"begins? with", "ends? with", "contains?", "does not contain",
"doesn't contain", "is in", "is contained by", "is not in",
"is not contained by", "isn't contained by", "div", "mod",
"not", "(a )?(ref( to)?|reference to)", "is", "does")
Control = ('considering', 'else', 'error', 'exit', 'from', 'if',
'ignoring', 'in', 'repeat', 'tell', 'then', 'times', 'to',
'try', 'until', 'using terms from', 'while', 'whith',
'with timeout( of)?', 'with transaction', 'by', 'continue',
'end', 'its?', 'me', 'my', 'return', 'of', 'as')
Declarations = ('global', 'local', 'prop(erty)?', 'set', 'get')
Reserved = ('but', 'put', 'returning', 'the')
StudioClasses = ('action cell', 'alert reply', 'application', 'box',
'browser( cell)?', 'bundle', 'button( cell)?', 'cell',
'clip view', 'color well', 'color-panel',
'combo box( item)?', 'control',
'data( (cell|column|item|row|source))?', 'default entry',
'dialog reply', 'document', 'drag info', 'drawer',
'event', 'font(-panel)?', 'formatter',
'image( (cell|view))?', 'matrix', 'menu( item)?', 'item',
'movie( view)?', 'open-panel', 'outline view', 'panel',
'pasteboard', 'plugin', 'popup button',
'progress indicator', 'responder', 'save-panel',
'scroll view', 'secure text field( cell)?', 'slider',
'sound', 'split view', 'stepper', 'tab view( item)?',
'table( (column|header cell|header view|view))',
'text( (field( cell)?|view))?', 'toolbar( item)?',
'user-defaults', 'view', 'window')
StudioEvents = ('accept outline drop', 'accept table drop', 'action',
'activated', 'alert ended', 'awake from nib', 'became key',
'became main', 'begin editing', 'bounds changed',
'cell value', 'cell value changed', 'change cell value',
'change item value', 'changed', 'child of item',
'choose menu item', 'clicked', 'clicked toolbar item',
'closed', 'column clicked', 'column moved',
'column resized', 'conclude drop', 'data representation',
'deminiaturized', 'dialog ended', 'document nib name',
'double clicked', 'drag( (entered|exited|updated))?',
'drop', 'end editing', 'exposed', 'idle', 'item expandable',
'item value', 'item value changed', 'items changed',
'keyboard down', 'keyboard up', 'launched',
'load data representation', 'miniaturized', 'mouse down',
'mouse dragged', 'mouse entered', 'mouse exited',
'mouse moved', 'mouse up', 'moved',
'number of browser rows', 'number of items',
'number of rows', 'open untitled', 'opened', 'panel ended',
'parameters updated', 'plugin loaded', 'prepare drop',
'prepare outline drag', 'prepare outline drop',
'prepare table drag', 'prepare table drop',
'read from file', 'resigned active', 'resigned key',
'resigned main', 'resized( sub views)?',
'right mouse down', 'right mouse dragged',
'right mouse up', 'rows changed', 'scroll wheel',
'selected tab view item', 'selection changed',
'selection changing', 'should begin editing',
'should close', 'should collapse item',
'should end editing', 'should expand item',
'should open( untitled)?',
'should quit( after last window closed)?',
'should select column', 'should select item',
'should select row', 'should select tab view item',
'should selection change', 'should zoom', 'shown',
'update menu item', 'update parameters',
'update toolbar item', 'was hidden', 'was miniaturized',
'will become active', 'will close', 'will dismiss',
'will display browser cell', 'will display cell',
'will display item cell', 'will display outline cell',
'will finish launching', 'will hide', 'will miniaturize',
'will move', 'will open', 'will pop up', 'will quit',
'will resign active', 'will resize( sub views)?',
'will select tab view item', 'will show', 'will zoom',
'write to file', 'zoomed')
StudioCommands = ('animate', 'append', 'call method', 'center',
'close drawer', 'close panel', 'display',
'display alert', 'display dialog', 'display panel', 'go',
'hide', 'highlight', 'increment', 'item for',
'load image', 'load movie', 'load nib', 'load panel',
'load sound', 'localized string', 'lock focus', 'log',
'open drawer', 'path for', 'pause', 'perform action',
'play', 'register', 'resume', 'scroll', 'select( all)?',
'show', 'size to fit', 'start', 'step back',
'step forward', 'stop', 'synchronize', 'unlock focus',
'update')
StudioProperties = ('accepts arrow key', 'action method', 'active',
'alignment', 'allowed identifiers',
'allows branch selection', 'allows column reordering',
'allows column resizing', 'allows column selection',
'allows customization',
'allows editing text attributes',
'allows empty selection', 'allows mixed state',
'allows multiple selection', 'allows reordering',
'allows undo', 'alpha( value)?', 'alternate image',
'alternate increment value', 'alternate title',
'animation delay', 'associated file name',
'associated object', 'auto completes', 'auto display',
'auto enables items', 'auto repeat',
'auto resizes( outline column)?',
'auto save expanded items', 'auto save name',
'auto save table columns', 'auto saves configuration',
'auto scroll', 'auto sizes all columns to fit',
'auto sizes cells', 'background color', 'bezel state',
'bezel style', 'bezeled', 'border rect', 'border type',
'bordered', 'bounds( rotation)?', 'box type',
'button returned', 'button type',
'can choose directories', 'can choose files',
'can draw', 'can hide',
'cell( (background color|size|type))?', 'characters',
'class', 'click count', 'clicked( data)? column',
'clicked data item', 'clicked( data)? row',
'closeable', 'collating', 'color( (mode|panel))',
'command key down', 'configuration',
'content(s| (size|view( margins)?))?', 'context',
'continuous', 'control key down', 'control size',
'control tint', 'control view',
'controller visible', 'coordinate system',
'copies( on scroll)?', 'corner view', 'current cell',
'current column', 'current( field)? editor',
'current( menu)? item', 'current row',
'current tab view item', 'data source',
'default identifiers', 'delta (x|y|z)',
'destination window', 'directory', 'display mode',
'displayed cell', 'document( (edited|rect|view))?',
'double value', 'dragged column', 'dragged distance',
'dragged items', 'draws( cell)? background',
'draws grid', 'dynamically scrolls', 'echos bullets',
'edge', 'editable', 'edited( data)? column',
'edited data item', 'edited( data)? row', 'enabled',
'enclosing scroll view', 'ending page',
'error handling', 'event number', 'event type',
'excluded from windows menu', 'executable path',
'expanded', 'fax number', 'field editor', 'file kind',
'file name', 'file type', 'first responder',
'first visible column', 'flipped', 'floating',
'font( panel)?', 'formatter', 'frameworks path',
'frontmost', 'gave up', 'grid color', 'has data items',
'has horizontal ruler', 'has horizontal scroller',
'has parent data item', 'has resize indicator',
'has shadow', 'has sub menu', 'has vertical ruler',
'has vertical scroller', 'header cell', 'header view',
'hidden', 'hides when deactivated', 'highlights by',
'horizontal line scroll', 'horizontal page scroll',
'horizontal ruler view', 'horizontally resizable',
'icon image', 'id', 'identifier',
'ignores multiple clicks',
'image( (alignment|dims when disabled|frame style|scaling))?',
'imports graphics', 'increment value',
'indentation per level', 'indeterminate', 'index',
'integer value', 'intercell spacing', 'item height',
'key( (code|equivalent( modifier)?|window))?',
'knob thickness', 'label', 'last( visible)? column',
'leading offset', 'leaf', 'level', 'line scroll',
'loaded', 'localized sort', 'location', 'loop mode',
'main( (bunde|menu|window))?', 'marker follows cell',
'matrix mode', 'maximum( content)? size',
'maximum visible columns',
'menu( form representation)?', 'miniaturizable',
'miniaturized', 'minimized image', 'minimized title',
'minimum column width', 'minimum( content)? size',
'modal', 'modified', 'mouse down state',
'movie( (controller|file|rect))?', 'muted', 'name',
'needs display', 'next state', 'next text',
'number of tick marks', 'only tick mark values',
'opaque', 'open panel', 'option key down',
'outline table column', 'page scroll', 'pages across',
'pages down', 'palette label', 'pane splitter',
'parent data item', 'parent window', 'pasteboard',
'path( (names|separator))?', 'playing',
'plays every frame', 'plays selection only', 'position',
'preferred edge', 'preferred type', 'pressure',
'previous text', 'prompt', 'properties',
'prototype cell', 'pulls down', 'rate',
'released when closed', 'repeated',
'requested print time', 'required file type',
'resizable', 'resized column', 'resource path',
'returns records', 'reuses columns', 'rich text',
'roll over', 'row height', 'rulers visible',
'save panel', 'scripts path', 'scrollable',
'selectable( identifiers)?', 'selected cell',
'selected( data)? columns?', 'selected data items?',
'selected( data)? rows?', 'selected item identifier',
'selection by rect', 'send action on arrow key',
'sends action when done editing', 'separates columns',
'separator item', 'sequence number', 'services menu',
'shared frameworks path', 'shared support path',
'sheet', 'shift key down', 'shows alpha',
'shows state by', 'size( mode)?',
'smart insert delete enabled', 'sort case sensitivity',
'sort column', 'sort order', 'sort type',
'sorted( data rows)?', 'sound', 'source( mask)?',
'spell checking enabled', 'starting page', 'state',
'string value', 'sub menu', 'super menu', 'super view',
'tab key traverses cells', 'tab state', 'tab type',
'tab view', 'table view', 'tag', 'target( printer)?',
'text color', 'text container insert',
'text container origin', 'text returned',
'tick mark position', 'time stamp',
'title(d| (cell|font|height|position|rect))?',
'tool tip', 'toolbar', 'trailing offset', 'transparent',
'treat packages as directories', 'truncated labels',
'types', 'unmodified characters', 'update views',
'use sort indicator', 'user defaults',
'uses data source', 'uses ruler',
'uses threaded animation',
'uses title from previous column', 'value wraps',
'version',
'vertical( (line scroll|page scroll|ruler view))?',
'vertically resizable', 'view',
'visible( document rect)?', 'volume', 'width', 'window',
'windows menu', 'wraps', 'zoomable', 'zoomed')
tokens = {
'root': [
(r'\s+', Text),
(u'¬\\n', String.Escape),
(r"'s\s+", Text), # This is a possessive, consider moving
(r'(--|#).*?$', Comment),
(r'\(\*', Comment.Multiline, 'comment'),
(r'[(){}!,.:]', Punctuation),
(u'(«)([^»]+)(»)',
bygroups(Text, Name.Builtin, Text)),
(r'\b((?:considering|ignoring)\s*)'
r'(application responses|case|diacriticals|hyphens|'
r'numeric strings|punctuation|white space)',
bygroups(Keyword, Name.Builtin)),
(u'(-|\\*|\\+|&|≠|>=?|<=?|=|≥|≤|/|÷|\\^)', Operator),
(r"\b(%s)\b" % '|'.join(Operators), Operator.Word),
(r'^(\s*(?:on|end)\s+)'
r'(%s)' % '|'.join(StudioEvents[::-1]),
bygroups(Keyword, Name.Function)),
(r'^(\s*)(in|on|script|to)(\s+)', bygroups(Text, Keyword, Text)),
(r'\b(as )(%s)\b' % '|'.join(Classes),
bygroups(Keyword, Name.Class)),
(r'\b(%s)\b' % '|'.join(Literals), Name.Constant),
(r'\b(%s)\b' % '|'.join(Commands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(Control), Keyword),
(r'\b(%s)\b' % '|'.join(Declarations), Keyword),
(r'\b(%s)\b' % '|'.join(Reserved), Name.Builtin),
(r'\b(%s)s?\b' % '|'.join(BuiltIn), Name.Builtin),
(r'\b(%s)\b' % '|'.join(HandlerParams), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioProperties), Name.Attribute),
(r'\b(%s)s?\b' % '|'.join(StudioClasses), Name.Builtin),
(r'\b(%s)\b' % '|'.join(StudioCommands), Name.Builtin),
(r'\b(%s)\b' % '|'.join(References), Name.Builtin),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r'\b(%s)\b' % Identifiers, Name.Variable),
(r'[-+]?(\d+\.\d*|\d*\.\d+)(E[-+][0-9]+)?', Number.Float),
(r'[-+]?\d+', Number.Integer),
],
'comment': [
('\(\*', Comment.Multiline, '#push'),
('\*\)', Comment.Multiline, '#pop'),
('[^*(]+', Comment.Multiline),
('[*(]', Comment.Multiline),
],
}
class RexxLexer(RegexLexer):
"""
`Rexx <http://www.rexxinfo.org/>`_ is a scripting language available for
a wide range of different platforms with its roots found on mainframe
systems. It is popular for I/O- and data based tasks and can act as glue
language to bind different applications together.
.. versionadded:: 2.0
"""
name = 'Rexx'
aliases = ['rexx', 'arexx']
filenames = ['*.rexx', '*.rex', '*.rx', '*.arexx']
mimetypes = ['text/x-rexx']
flags = re.IGNORECASE
tokens = {
'root': [
(r'\s', Whitespace),
(r'/\*', Comment.Multiline, 'comment'),
(r'"', String, 'string_double'),
(r"'", String, 'string_single'),
(r'[0-9]+(\.[0-9]+)?(e[+-]?[0-9])?', Number),
(r'([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b',
bygroups(Name.Function, Whitespace, Operator, Whitespace,
Keyword.Declaration)),
(r'([a-z_]\w*)(\s*)(:)',
bygroups(Name.Label, Whitespace, Operator)),
include('function'),
include('keyword'),
include('operator'),
(r'[a-z_]\w*', Text),
],
'function': [
(words((
'abbrev', 'abs', 'address', 'arg', 'b2x', 'bitand', 'bitor', 'bitxor',
'c2d', 'c2x', 'center', 'charin', 'charout', 'chars', 'compare',
'condition', 'copies', 'd2c', 'd2x', 'datatype', 'date', 'delstr',
'delword', 'digits', 'errortext', 'form', 'format', 'fuzz', 'insert',
'lastpos', 'left', 'length', 'linein', 'lineout', 'lines', 'max',
'min', 'overlay', 'pos', 'queued', 'random', 'reverse', 'right', 'sign',
'sourceline', 'space', 'stream', 'strip', 'substr', 'subword', 'symbol',
'time', 'trace', 'translate', 'trunc', 'value', 'verify', 'word',
'wordindex', 'wordlength', 'wordpos', 'words', 'x2b', 'x2c', 'x2d',
'xrange'), suffix=r'(\s*)(\()'),
bygroups(Name.Builtin, Whitespace, Operator)),
],
'keyword': [
(r'(address|arg|by|call|do|drop|else|end|exit|for|forever|if|'
r'interpret|iterate|leave|nop|numeric|off|on|options|parse|'
r'pull|push|queue|return|say|select|signal|to|then|trace|until|'
r'while)\b', Keyword.Reserved),
],
'operator': [
(r'(-|//|/|\(|\)|\*\*|\*|\\<<|\\<|\\==|\\=|\\>>|\\>|\\|\|\||\||'
r'&&|&|%|\+|<<=|<<|<=|<>|<|==|=|><|>=|>>=|>>|>|¬<<|¬<|¬==|¬=|'
r'¬>>|¬>|¬|\.|,)', Operator),
],
'string_double': [
(r'[^"\n]+', String),
(r'""', String),
(r'"', String, '#pop'),
(r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
],
'string_single': [
(r'[^\'\n]', String),
(r'\'\'', String),
(r'\'', String, '#pop'),
(r'\n', Text, '#pop'), # Stray linefeed also terminates strings.
],
'comment': [
(r'[^*]+', Comment.Multiline),
(r'\*/', Comment.Multiline, '#pop'),
(r'\*', Comment.Multiline),
]
}
_c = lambda s: re.compile(s, re.MULTILINE)
_ADDRESS_COMMAND_PATTERN = _c(r'^\s*address\s+command\b')
_ADDRESS_PATTERN = _c(r'^\s*address\s+')
_DO_WHILE_PATTERN = _c(r'^\s*do\s+while\b')
_IF_THEN_DO_PATTERN = _c(r'^\s*if\b.+\bthen\s+do\s*$')
_PROCEDURE_PATTERN = _c(r'^\s*([a-z_]\w*)(\s*)(:)(\s*)(procedure)\b')
_ELSE_DO_PATTERN = _c(r'\belse\s+do\s*$')
_PARSE_ARG_PATTERN = _c(r'^\s*parse\s+(upper\s+)?(arg|value)\b')
PATTERNS_AND_WEIGHTS = (
(_ADDRESS_COMMAND_PATTERN, 0.2),
(_ADDRESS_PATTERN, 0.05),
(_DO_WHILE_PATTERN, 0.1),
(_ELSE_DO_PATTERN, 0.1),
(_IF_THEN_DO_PATTERN, 0.1),
(_PROCEDURE_PATTERN, 0.5),
(_PARSE_ARG_PATTERN, 0.2),
)
def analyse_text(text):
"""
Check for inital comment and patterns that distinguish Rexx from other
C-like languages.
"""
if re.search(r'/\*\**\s*rexx', text, re.IGNORECASE):
# Header matches MVS Rexx requirements, this is certainly a Rexx
# script.
return 1.0
elif text.startswith('/*'):
# Header matches general Rexx requirements; the source code might
# still be any language using C comments such as C++, C# or Java.
lowerText = text.lower()
result = sum(weight
for (pattern, weight) in RexxLexer.PATTERNS_AND_WEIGHTS
if pattern.search(lowerText)) + 0.01
return min(result, 1.0)
class MOOCodeLexer(RegexLexer):
"""
For `MOOCode <http://www.moo.mud.org/>`_ (the MOO scripting
language).
.. versionadded:: 0.9
"""
name = 'MOOCode'
filenames = ['*.moo']
aliases = ['moocode', 'moo']
mimetypes = ['text/x-moocode']
tokens = {
'root': [
# Numbers
(r'(0|[1-9][0-9_]*)', Number.Integer),
# Strings
(r'"(\\\\|\\"|[^"])*"', String),
# exceptions
(r'(E_PERM|E_DIV)', Name.Exception),
# db-refs
(r'((#[-0-9]+)|(\$\w+))', Name.Entity),
# Keywords
(r'\b(if|else|elseif|endif|for|endfor|fork|endfork|while'
r'|endwhile|break|continue|return|try'
r'|except|endtry|finally|in)\b', Keyword),
# builtins
(r'(random|length)', Name.Builtin),
# special variables
(r'(player|caller|this|args)', Name.Variable.Instance),
# skip whitespace
(r'\s+', Text),
(r'\n', Text),
# other operators
(r'([!;=,{}&|:.\[\]@()<>?]+)', Operator),
# function call
(r'(\w+)(\()', bygroups(Name.Function, Operator)),
# variables
(r'(\w+)', Text),
]
}
class HybrisLexer(RegexLexer):
"""
For `Hybris <http://www.hybris-lang.org>`_ source code.
.. versionadded:: 1.4
"""
name = 'Hybris'
aliases = ['hybris', 'hy']
filenames = ['*.hy', '*.hyb']
mimetypes = ['text/x-hybris', 'application/x-hybris']
flags = re.MULTILINE | re.DOTALL
tokens = {
'root': [
# method names
(r'^(\s*(?:function|method|operator\s+)+?)'
r'([a-zA-Z_]\w*)'
r'(\s*)(\()', bygroups(Keyword, Name.Function, Text, Operator)),
(r'[^\S\n]+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'@[a-zA-Z_][\w.]*', Name.Decorator),
(r'(break|case|catch|next|default|do|else|finally|for|foreach|of|'
r'unless|if|new|return|switch|me|throw|try|while)\b', Keyword),
(r'(extends|private|protected|public|static|throws|function|method|'
r'operator)\b', Keyword.Declaration),
(r'(true|false|null|__FILE__|__LINE__|__VERSION__|__LIB_PATH__|'
r'__INC_PATH__)\b', Keyword.Constant),
(r'(class|struct)(\s+)',
bygroups(Keyword.Declaration, Text), 'class'),
(r'(import|include)(\s+)',
bygroups(Keyword.Namespace, Text), 'import'),
(words((
'gc_collect', 'gc_mm_items', 'gc_mm_usage', 'gc_collect_threshold',
'urlencode', 'urldecode', 'base64encode', 'base64decode', 'sha1', 'crc32', 'sha2',
'md5', 'md5_file', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh', 'exp',
'fabs', 'floor', 'fmod', 'log', 'log10', 'pow', 'sin', 'sinh', 'sqrt', 'tan', 'tanh',
'isint', 'isfloat', 'ischar', 'isstring', 'isarray', 'ismap', 'isalias', 'typeof',
'sizeof', 'toint', 'tostring', 'fromxml', 'toxml', 'binary', 'pack', 'load', 'eval',
'var_names', 'var_values', 'user_functions', 'dyn_functions', 'methods', 'call',
'call_method', 'mknod', 'mkfifo', 'mount', 'umount2', 'umount', 'ticks', 'usleep',
'sleep', 'time', 'strtime', 'strdate', 'dllopen', 'dlllink', 'dllcall', 'dllcall_argv',
'dllclose', 'env', 'exec', 'fork', 'getpid', 'wait', 'popen', 'pclose', 'exit', 'kill',
'pthread_create', 'pthread_create_argv', 'pthread_exit', 'pthread_join', 'pthread_kill',
'smtp_send', 'http_get', 'http_post', 'http_download', 'socket', 'bind', 'listen',
'accept', 'getsockname', 'getpeername', 'settimeout', 'connect', 'server', 'recv',
'send', 'close', 'print', 'println', 'printf', 'input', 'readline', 'serial_open',
'serial_fcntl', 'serial_get_attr', 'serial_get_ispeed', 'serial_get_ospeed',
'serial_set_attr', 'serial_set_ispeed', 'serial_set_ospeed', 'serial_write',
'serial_read', 'serial_close', 'xml_load', 'xml_parse', 'fopen', 'fseek', 'ftell',
'fsize', 'fread', 'fwrite', 'fgets', 'fclose', 'file', 'readdir', 'pcre_replace', 'size',
'pop', 'unmap', 'has', 'keys', 'values', 'length', 'find', 'substr', 'replace', 'split',
'trim', 'remove', 'contains', 'join'), suffix=r'\b'),
Name.Builtin),
(words((
'MethodReference', 'Runner', 'Dll', 'Thread', 'Pipe', 'Process',
'Runnable', 'CGI', 'ClientSocket', 'Socket', 'ServerSocket',
'File', 'Console', 'Directory', 'Exception'), suffix=r'\b'),
Keyword.Type),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'\\.'|'[^\\]'|'\\u[0-9a-f]{4}'", String.Char),
(r'(\.)([a-zA-Z_]\w*)',
bygroups(Operator, Name.Attribute)),
(r'[a-zA-Z_]\w*:', Name.Label),
(r'[a-zA-Z_$]\w*', Name),
(r'[~^*!%&\[\](){}<>|+=:;,./?\-@]+', Operator),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+L?', Number.Integer),
(r'\n', Text),
],
'class': [
(r'[a-zA-Z_]\w*', Name.Class, '#pop')
],
'import': [
(r'[\w.]+\*?', Name.Namespace, '#pop')
],
}
| [
"[email protected]"
] | |
495e3c3956b6601de7ec38f5589268de8a90e8f0 | be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1 | /DaVinciDev_v38r1p1/InstallArea/x86_64-slc6-gcc49-opt/python/StrippingArchive/Stripping15/StrippingDiMuonNew.py | b2c1acc2a142435c53e815e58b509aa5621c7df9 | [] | no_license | Sally27/backup_cmtuser_full | 34782102ed23c6335c48650a6eaa901137355d00 | 8924bebb935b96d438ce85b384cfc132d9af90f6 | refs/heads/master | 2020-05-21T09:27:04.370765 | 2018-12-12T14:41:07 | 2018-12-12T14:41:07 | 185,989,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,139 | py | '''
Inclusive DiMuon lines based on the lines by Gaia Lanfranchi, Alessio Sarti,
with inputs from Joel Bressieux, Giulia Manca, Matthew Needham and Patrick Robbe.
Including the following lines:
1. DiMuonLine
FullDST (keep 10%) and MicroDST
2. DiMuonSameSignLine
FullDST (keep 1%) and MicroDST (keep 10%)
3. DiMuonExclusiveLine
Selection is the same as DiMuonLine at present,
a cut of "1mm upstream of any PV" applied further
4. DiMuonNoPVLine
Using the same selection as DiMuonExclusiveLine at present (except the PV cut)
Requring no reconstructed PV.
5. DiMuonHighMassLine
6. DiMuonHighMassSameSignLine
7. DiMuonLowMassLine
Keep Hlt2UnbiasedDiMuonLowMassDecision triggered events
8. Jpsi2MuMuLine
9. Psi2MuMuLine
10. DiMuonDetachedLine
11. Jpsi2MuMuDetachedLine
More details can be found here:
http://indico.cern.ch/contributionDisplay.py?contribId=2&confId=100755
--------------------------
To include lines for DiMuon stream
--------------------------
from StrippingSelections.StrippingDiMuonNew import DiMuonConf
from StrippingSelections.StrippingDiMuonNew import config_default as config_FullDSTDiMuon
FullDSTDiMuonConf = DiMuonConf( name = None, config =config_FullDSTDiMuon )
stream.appendLines( FullDSTDiMuonConf.lines() )
--------------------------
For MicroDST
--------------------------
from StrippingSelections.StrippingDiMuonNew import DiMuonConf
from StrippingSelections.StrippingDiMuonNew import config_microDST as MicroDSTDiMuon
MicroDSTDiMuonConf = DiMuonConf( name = 'MicroDST', config = MicroDSTDiMuon )
stream.appendLines( MicroDSTDiMuonConf.lines() )
'''
__author__=['Jibo He']
__date__ = '30/09/2010'
__version__= '$Revision: 1.0 $'
__all__ = (
'DiMuonConf'
)
config_default= {
'MicroDST' : False ,
# DiMuon line
'DiMuon_Prescale' : 1. ,
'DiMuon_Postscale' : 1. ,
'DiMuon_checkPV' : False ,
'DiMuon_MuonPT' : 650. , # MeV
'DiMuon_MuonP' : -8000. , # MeV, no cut now
'DiMuon_MuonTRCHI2DOF' : 5. ,
'DiMuon_MinMass' : 2900. , # MeV
'DiMuon_VCHI2PDOF' : 20. ,
'DiMuon_PT' : 3000. , # MeV, no cut now
# DiMuon Same Sign line
'DiMuonSameSign_Prescale' : 0.05 ,
'DiMuonSameSign_Postscale' : 1. ,
'DiMuonSameSign_checkPV' : False ,
# DiMuonPrescaled line
'DiMuonPrescaled_Prescale' : 0.1 ,
'DiMuonPrescaled_Postscale' : 1. ,
'DiMuonPrescaled_checkPV' : False ,
'DiMuonPrescaled_MuonPT' : 650. , # MeV
'DiMuonPrescaled_MuonP' : -8000. , # MeV, no cut now
'DiMuonPrescaled_MuonTRCHI2DOF' : 5. ,
'DiMuonPrescaled_MinMass' : 2900. , # MeV
'DiMuonPrescaled_VCHI2PDOF' : 20. ,
'DiMuonPrescaled_PT' : -1000. , # MeV, no cut now
# DiMuonExclusive line
'DiMuonExclusive_Prescale' : 0. ,
'DiMuonExclusive_Postscale' : 1. ,
'DiMuonExclusive_checkPV' : True ,
'DiMuonExclusive_MuonPT' : 650. , # MeV
'DiMuonExclusive_MuonP' : -8000. , # MeV, no cut now
'DiMuonExclusive_MuonTRCHI2DOF' : 5. ,
'DiMuonExclusive_MinMass' : 2900. ,
'DiMuonExclusive_VCHI2PDOF' : 20. ,
'DiMuonExclusive_PT' : -1000. , # MeV, no cut now
'DiMuonExclusive_DZ' : -1. , # mm, upstream of any PV
# DiMuonNoPV line
'DiMuonNoPV_Prescale' : 1. ,
'DiMuonNoPV_Postscale' : 1. ,
# DiMuon High Mass line
'DiMuonHighMass_Prescale' : 1. ,
'DiMuonHighMass_Postscale' : 1. ,
'DiMuonHighMass_checkPV' : False ,
'DiMuonHighMass_MuonPT' : 650. , # MeV
'DiMuonHighMass_MuonP' : -8000. ,
'DiMuonHighMass_MuonTRCHI2DOF' : 5. ,
'DiMuonHighMass_MinMass' : 8500. , # MeV
'DiMuonHighMass_VCHI2PDOF' : 20. ,
'DiMuonHighMass_PT' : -1000. , # MeV, no cut now
# DiMuon High Mass Same Sign line
'DiMuonHighMassSameSign_Prescale' : 0.5 ,
'DiMuonHighMassSameSign_Postscale' : 1. ,
'DiMuonHighMassSameSign_checkPV' : False ,
# DiMuon Low Mass line
'DiMuonLowMass_Prescale' : 1. ,
'DiMuonLowMass_Postscale' : 1. ,
'DiMuonLowMass_checkPV' : False ,
'DiMuonLowMass_MuonPT' : 650. , # MeV
'DiMuonLowMass_MuonP' : -8000. , # MeV, no cut now
'DiMuonLowMass_MuonTRCHI2DOF' : 5. ,
'DiMuonLowMass_MinMass' : 500. , # MeV
'DiMuonLowMass_VCHI2PDOF' : 20. ,
'DiMuonLowMass_PT' : -1000. , # MeV, no cut now
# Jpsi2MuMu line
'Jpsi2MuMu_Prescale' : 1. ,
'Jpsi2MuMu_Postscale' : 1. ,
'Jpsi2MuMu_checkPV' : False ,
'Jpsi2MuMu_MuonPT' : 650. , # MeV
'Jpsi2MuMu_MuonP' : 8000. , # MeV
'Jpsi2MuMu_MuonPIDmu' : 0. ,
'Jpsi2MuMu_MuonTRCHI2DOF' : 5. ,
'Jpsi2MuMu_MinMass' : 3010. , # MeV
'Jpsi2MuMu_MaxMass' : 3170. , # MeV
'Jpsi2MuMu_VCHI2PDOF' : 20. ,
'Jpsi2MuMu_PT' : 3000. , # MeV
# Psi2MuMu line
'Psi2MuMu_Prescale' : 1. ,
'Psi2MuMu_Postscale' : 1. ,
'Psi2MuMu_checkPV' : False ,
'Psi2MuMu_ParticleName' : "'psi(2S)'", # Particle Name, like "'psi(2S)'"
'Psi2MuMu_MuonPT' : 1000. , # MeV
'Psi2MuMu_MuonP' : 8000. , # MeV
'Psi2MuMu_MuonPIDmu' : 0. ,
'Psi2MuMu_MuonTRCHI2DOF' : 5. ,
'Psi2MuMu_MassWindow' : 120. , # MeV
'Psi2MuMu_VCHI2PDOF' : 20. ,
'Psi2MuMu_PT' : 3000. , # MeV
# DiMuonDetached line
'DiMuonDetached_Prescale' : 0. ,
'DiMuonDetached_Postscale' : 1. ,
'DiMuonDetached_MuonPT' : 500. , # MeV
'DiMuonDetached_MuonP' : -8000. , # MeV, no cut now
'DiMuonDetached_MuonPIDmu' : -5. ,
'DiMuonDetached_MuonTRCHI2DOF' : 5. ,
'DiMuonDetached_MinMass' : 2950. ,
'DiMuonDetached_VCHI2PDOF' : 20. ,
'DiMuonDetached_PT' : -1000. , # MeV, no cut now
'DiMuonDetached_DLS' : 5. , # mm, upstream of any PV
# Jpsi2MuMuDetached line
'Jpsi2MuMuDetached_Prescale' : 1. ,
'Jpsi2MuMuDetached_Postscale' : 1. ,
'Jpsi2MuMuDetached_MuonPT' : 500. , # MeV
'Jpsi2MuMuDetached_MuonP' : -8000. , # MeV, no cut now
'Jpsi2MuMuDetached_MuonPIDmu' : -5. ,
'Jpsi2MuMuDetached_MuonTRCHI2DOF' : 5. ,
'Jpsi2MuMuDetached_MinMass' : 2976.916, # MeV
'Jpsi2MuMuDetached_MaxMass' : 3216.916, # MeV
'Jpsi2MuMuDetached_VCHI2PDOF' : 20. ,
'Jpsi2MuMuDetached_PT' : -1000. , # MeV
'Jpsi2MuMuDetached_DLS' : 3. ,
# Psi2MuMuDetachedDetached line
'Psi2MuMuDetached_Prescale' : 1. ,
'Psi2MuMuDetached_Postscale' : 1. ,
'Psi2MuMuDetached_ParticleName' : "'psi(2S)'", # Particle Name, like "'psi(2S)'"
'Psi2MuMuDetached_MuonPT' : 500. , # MeV
'Psi2MuMuDetached_MuonP' : -8000. , # MeV, no cut now
'Psi2MuMuDetached_MuonPIDmu' : -5. ,
'Psi2MuMuDetached_MuonTRCHI2DOF' : 5. ,
'Psi2MuMuDetached_MassWindow' : 120. , # MeV
'Psi2MuMuDetached_VCHI2PDOF' : 20. ,
'Psi2MuMuDetached_PT' : -1000. , # MeV, no cut now
'Psi2MuMuDetached_DLS' : 5.
}
config_microDST= {
'MicroDST' : True ,
# DiMuon line
'DiMuon_Prescale' : 1. ,
'DiMuon_Postscale' : 1. ,
'DiMuon_checkPV' : False ,
'DiMuon_MuonPT' : 650. , # MeV
'DiMuon_MuonP' : -8000. , # MeV, no cut now
'DiMuon_MuonTRCHI2DOF' : 5. ,
'DiMuon_MinMass' : 2900. , # MeV
'DiMuon_VCHI2PDOF' : 20. ,
'DiMuon_PT' : 3000. , # MeV
# DiMuon Same Sign line
'DiMuonSameSign_Prescale' : 0.05 ,
'DiMuonSameSign_Postscale' : 1. ,
'DiMuonSameSign_checkPV' : False ,
# DiMuonPrescaled line
'DiMuonPrescaled_Prescale' : 0.1 ,
'DiMuonPrescaled_Postscale' : 1. ,
'DiMuonPrescaled_checkPV' : False ,
'DiMuonPrescaled_MuonPT' : 650. , # MeV
'DiMuonPrescaled_MuonP' : -8000. , # MeV, no cut now
'DiMuonPrescaled_MuonTRCHI2DOF' : 5. ,
'DiMuonPrescaled_MinMass' : 2900. , # MeV
'DiMuonPrescaled_VCHI2PDOF' : 20. ,
'DiMuonPrescaled_PT' : -1000. , # MeV, no cut now
# DiMuonExclusive line
'DiMuonExclusive_Prescale' : 1. ,
'DiMuonExclusive_Postscale' : 1. ,
'DiMuonExclusive_checkPV' : True ,
'DiMuonExclusive_MuonPT' : 650. , # MeV
'DiMuonExclusive_MuonP' : -8000. , # MeV, no cut now
'DiMuonExclusive_MuonTRCHI2DOF' : 5. ,
'DiMuonExclusive_MinMass' : 2900. ,
'DiMuonExclusive_VCHI2PDOF' : 20. ,
'DiMuonExclusive_PT' : -1000. , # MeV, no cut now
'DiMuonExclusive_DZ' : -1. , # mm, upstream of any PV
# DiMuonNoPV line
'DiMuonNoPV_Prescale' : 1. ,
'DiMuonNoPV_Postscale' : 1. ,
# DiMuon High Mass line
'DiMuonHighMass_Prescale' : 1. ,
'DiMuonHighMass_Postscale' : 1. ,
'DiMuonHighMass_checkPV' : True ,
'DiMuonHighMass_MuonPT' : 650. , # MeV
'DiMuonHighMass_MuonP' : -8000. ,
'DiMuonHighMass_MuonTRCHI2DOF' : 5. ,
'DiMuonHighMass_MinMass' : 8000. , # MeV
'DiMuonHighMass_VCHI2PDOF' : 20. ,
'DiMuonHighMass_PT' : -1000. , # MeV, no cut now
# DiMuon High Mass Same Sign line
'DiMuonHighMassSameSign_Prescale' : 1. ,
'DiMuonHighMassSameSign_Postscale' : 1. ,
'DiMuonHighMassSameSign_checkPV' : True ,
# DiMuon Low Mass line
'DiMuonLowMass_Prescale' : 1. ,
'DiMuonLowMass_Postscale' : 1. ,
'DiMuonLowMass_checkPV' : True ,
'DiMuonLowMass_MuonPT' : 650. , # MeV
'DiMuonLowMass_MuonP' : -8000. , # MeV, no cut now
'DiMuonLowMass_MuonTRCHI2DOF' : 5. ,
'DiMuonLowMass_MinMass' : 500. , # MeV
'DiMuonLowMass_VCHI2PDOF' : 20. ,
'DiMuonLowMass_PT' : -1000. , # MeV, no cut now
# Jpsi2MuMu line
'Jpsi2MuMu_Prescale' : 1. ,
'Jpsi2MuMu_Postscale' : 1. ,
'Jpsi2MuMu_checkPV' : True ,
'Jpsi2MuMu_MuonPT' : 650. , # MeV
'Jpsi2MuMu_MuonP' : -8000. , # MeV, no cut now
'Jpsi2MuMu_MuonPIDmu' : 0. ,
'Jpsi2MuMu_MuonTRCHI2DOF' : 5. ,
'Jpsi2MuMu_MinMass' : 3010. , # MeV
'Jpsi2MuMu_MaxMass' : 3170. , # MeV
'Jpsi2MuMu_VCHI2PDOF' : 20. ,
'Jpsi2MuMu_PT' : 3000.0 , # MeV
# Psi2MuMu line
'Psi2MuMu_Prescale' : 1. ,
'Psi2MuMu_Postscale' : 1. ,
'Psi2MuMu_checkPV' : True ,
'Psi2MuMu_ParticleName' : "'psi(2S)'", # Particle Name, like "'psi(2S)'"
'Psi2MuMu_MuonPT' : 1000. , # MeV
'Psi2MuMu_MuonP' : 8000. , # MeV
'Psi2MuMu_MuonPIDmu' : 0. ,
'Psi2MuMu_MuonTRCHI2DOF' : 5. ,
'Psi2MuMu_MassWindow' : 120. , # MeV
'Psi2MuMu_VCHI2PDOF' : 20. ,
'Psi2MuMu_PT' : 2000. , # MeV
# DiMuonDetached line
'DiMuonDetached_Prescale' : 1. ,
'DiMuonDetached_Postscale' : 1. ,
'DiMuonDetached_MuonPT' : 500. , # MeV
'DiMuonDetached_MuonP' : -8000. , # MeV, no cut now
'DiMuonDetached_MuonPIDmu' : -5. ,
'DiMuonDetached_MuonTRCHI2DOF' : 5. ,
'DiMuonDetached_MinMass' : 2950. ,
'DiMuonDetached_VCHI2PDOF' : 20. ,
'DiMuonDetached_PT' : -1000. , # MeV, no cut now
'DiMuonDetached_DLS' : 5. , # mm, upstream of any PV
# Jpsi2MuMuDetached line
'Jpsi2MuMuDetached_Prescale' : 1. ,
'Jpsi2MuMuDetached_Postscale' : 1. ,
'Jpsi2MuMuDetached_MuonPT' : 500. , # MeV
'Jpsi2MuMuDetached_MuonP' : -8000. , # MeV, no cut now
'Jpsi2MuMuDetached_MuonPIDmu' : -5. ,
'Jpsi2MuMuDetached_MuonTRCHI2DOF' : 5. ,
'Jpsi2MuMuDetached_MinMass' : 2976.916, # MeV
'Jpsi2MuMuDetached_MaxMass' : 3216.916, # MeV
'Jpsi2MuMuDetached_VCHI2PDOF' : 20. ,
'Jpsi2MuMuDetached_PT' : -1000. , # MeV
'Jpsi2MuMuDetached_DLS' : 3. ,
# Psi2MuMuDetachedDetached line
'Psi2MuMuDetached_Prescale' : 1. ,
'Psi2MuMuDetached_Postscale' : 1. ,
'Psi2MuMuDetached_ParticleName' : "'psi(2S)'", # Particle Name, like "'psi(2S)'"
'Psi2MuMuDetached_MuonPT' : 500. , # MeV
'Psi2MuMuDetached_MuonP' : -8000. , # MeV, no cut now
'Psi2MuMuDetached_MuonPIDmu' : -5. ,
'Psi2MuMuDetached_MuonTRCHI2DOF' : 5. ,
'Psi2MuMuDetached_MassWindow' : 120. , # MeV
'Psi2MuMuDetached_VCHI2PDOF' : 20. ,
'Psi2MuMuDetached_PT' : -1000. , # MeV, no cut now
'Psi2MuMuDetached_DLS' : 5.
}
from Gaudi.Configuration import *
from GaudiConfUtils.ConfigurableGenerators import FilterDesktop
from Configurables import LoKi__VoidFilter
from PhysSelPython.Wrappers import Selection, DataOnDemand, EventSelection
from StrippingConf.StrippingLine import StrippingLine
from StrippingUtils.Utils import LineBuilder
class DiMuonConf(LineBuilder):
__configuration_keys__ = (
"MicroDST",
# DiMuon line
'DiMuon_Prescale',
'DiMuon_Postscale',
'DiMuon_checkPV',
'DiMuon_MuonPT',
'DiMuon_MuonP',
'DiMuon_MuonTRCHI2DOF',
'DiMuon_MinMass',
'DiMuon_VCHI2PDOF',
'DiMuon_PT',
# DiMuon Same Sign line
'DiMuonSameSign_Prescale',
'DiMuonSameSign_Postscale',
'DiMuonSameSign_checkPV',
# DiMuonPrescaled line
'DiMuonPrescaled_Prescale',
'DiMuonPrescaled_Postscale',
'DiMuonPrescaled_checkPV',
'DiMuonPrescaled_MuonPT',
'DiMuonPrescaled_MuonP',
'DiMuonPrescaled_MuonTRCHI2DOF',
'DiMuonPrescaled_MinMass',
'DiMuonPrescaled_VCHI2PDOF',
'DiMuonPrescaled_PT',
# DiMuonExclusive line
'DiMuonExclusive_Prescale',
'DiMuonExclusive_Postscale',
'DiMuonExclusive_checkPV',
'DiMuonExclusive_MuonPT',
'DiMuonExclusive_MuonP',
'DiMuonExclusive_MuonTRCHI2DOF',
'DiMuonExclusive_MinMass',
'DiMuonExclusive_VCHI2PDOF',
'DiMuonExclusive_PT',
'DiMuonExclusive_DZ',
# DiMuonNoPV line
'DiMuonNoPV_Prescale',
'DiMuonNoPV_Postscale',
# DiMuon High Mass line
'DiMuonHighMass_Prescale',
'DiMuonHighMass_Postscale',
'DiMuonHighMass_checkPV',
'DiMuonHighMass_MuonPT',
'DiMuonHighMass_MuonP',
'DiMuonHighMass_MuonTRCHI2DOF',
'DiMuonHighMass_MinMass',
'DiMuonHighMass_VCHI2PDOF',
'DiMuonHighMass_PT',
# DiMuonHighMassSameSign line
'DiMuonHighMassSameSign_Prescale',
'DiMuonHighMassSameSign_Postscale',
'DiMuonHighMassSameSign_checkPV',
# DiMuon Low Mass line
'DiMuonLowMass_Prescale',
'DiMuonLowMass_Postscale',
'DiMuonLowMass_checkPV',
'DiMuonLowMass_MuonPT',
'DiMuonLowMass_MuonP',
'DiMuonLowMass_MuonTRCHI2DOF',
'DiMuonLowMass_MinMass',
'DiMuonLowMass_VCHI2PDOF',
'DiMuonLowMass_PT',
# Tight Jpsi line
'Jpsi2MuMu_Prescale',
'Jpsi2MuMu_Postscale',
'Jpsi2MuMu_checkPV',
'Jpsi2MuMu_MuonPT',
'Jpsi2MuMu_MuonP',
'Jpsi2MuMu_MuonPIDmu',
'Jpsi2MuMu_MuonTRCHI2DOF',
'Jpsi2MuMu_MinMass',
'Jpsi2MuMu_MaxMass',
'Jpsi2MuMu_VCHI2PDOF',
'Jpsi2MuMu_PT',
# Tight Psi(2S) line
'Psi2MuMu_Prescale',
'Psi2MuMu_Postscale',
'Psi2MuMu_checkPV',
'Psi2MuMu_ParticleName',
'Psi2MuMu_MuonPT',
'Psi2MuMu_MuonP',
'Psi2MuMu_MuonPIDmu',
'Psi2MuMu_MuonTRCHI2DOF',
'Psi2MuMu_MassWindow',
'Psi2MuMu_VCHI2PDOF',
'Psi2MuMu_PT',
# DiMuonDetached line
'DiMuonDetached_Prescale',
'DiMuonDetached_Postscale',
'DiMuonDetached_MuonPT',
'DiMuonDetached_MuonP',
'DiMuonDetached_MuonPIDmu',
'DiMuonDetached_MuonTRCHI2DOF',
'DiMuonDetached_MinMass',
'DiMuonDetached_VCHI2PDOF',
'DiMuonDetached_PT',
'DiMuonDetached_DLS',
# Jpsi2MuMuDetached line
'Jpsi2MuMuDetached_Prescale',
'Jpsi2MuMuDetached_Postscale',
'Jpsi2MuMuDetached_MuonPT',
'Jpsi2MuMuDetached_MuonP',
'Jpsi2MuMuDetached_MuonPIDmu',
'Jpsi2MuMuDetached_MuonTRCHI2DOF',
'Jpsi2MuMuDetached_MinMass',
'Jpsi2MuMuDetached_MaxMass',
'Jpsi2MuMuDetached_VCHI2PDOF',
'Jpsi2MuMuDetached_PT',
'Jpsi2MuMuDetached_DLS',
# Psi2MuMuDetached line
'Psi2MuMuDetached_Prescale',
'Psi2MuMuDetached_Postscale',
'Psi2MuMuDetached_ParticleName',
'Psi2MuMuDetached_MuonPT',
'Psi2MuMuDetached_MuonP',
'Psi2MuMuDetached_MuonPIDmu',
'Psi2MuMuDetached_MuonTRCHI2DOF',
'Psi2MuMuDetached_MassWindow',
'Psi2MuMuDetached_VCHI2PDOF',
'Psi2MuMuDetached_PT',
'Psi2MuMuDetached_DLS'
)
def __init__(self, name, config):
LineBuilder.__init__(self, name, config)
# if name not set outside, set it to empty
if name == None:
name = ""
"""
DiMuon line
"""
self.SelDiMuon = filterDiMuon( name + 'DiMuon',
MuonPT = config['DiMuon_MuonPT'],
MuonP = config['DiMuon_MuonP'],
MuonTRCHI2DOF = config['DiMuon_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuon_MinMass'],
MuMuVCHI2PDOF = config['DiMuon_VCHI2PDOF'],
MuMuPT = config['DiMuon_PT']
)
self.DiMuonLine = StrippingLine( name + 'DiMuonInc' + 'Line',
prescale = config['DiMuon_Prescale'],
postscale = config['DiMuon_Postscale'],
checkPV = config['DiMuon_checkPV'],
selection = self.SelDiMuon
)
"""
DiMuon same sign line
"""
self.SelDiMuonSameSign = filterDiMuonSameSign( name + 'DiMuonSameSign',
MuonPT = config['DiMuon_MuonPT'],
MuonP = config['DiMuon_MuonP'],
MuonTRCHI2DOF = config['DiMuon_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuon_MinMass'],
MuMuVCHI2PDOF = config['DiMuon_VCHI2PDOF'],
MuMuPT = config['DiMuon_PT']
)
self.DiMuonSameSignLine = StrippingLine( name + 'DiMuonSameSign' + 'Line',
prescale = config['DiMuonSameSign_Prescale'],
postscale = config['DiMuonSameSign_Postscale'],
checkPV = config['DiMuonSameSign_checkPV'],
selection = self.SelDiMuonSameSign
)
"""
DiMuonPrescaled line
"""
self.SelDiMuonPrescaled = filterDiMuon( name + 'DiMuonPrescaled',
MuonPT = config['DiMuonPrescaled_MuonPT'],
MuonP = config['DiMuonPrescaled_MuonP'],
MuonTRCHI2DOF = config['DiMuonPrescaled_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonPrescaled_MinMass'],
MuMuVCHI2PDOF = config['DiMuonPrescaled_VCHI2PDOF'],
MuMuPT = config['DiMuonPrescaled_PT']
)
self.DiMuonPrescaledLine = StrippingLine( name + 'DiMuonPrescaled' + 'Line',
prescale = config['DiMuonPrescaled_Prescale'],
postscale = config['DiMuonPrescaled_Postscale'],
checkPV = config['DiMuonPrescaled_checkPV'],
selection = self.SelDiMuonPrescaled
)
"""
DiMuonExclusiveline
"""
self.SelDiMuonExclusive = filterDiMuonAndDZ( name + 'DiMuonExclusive',
MuonPT = config['DiMuonExclusive_MuonPT'],
MuonP = config['DiMuonExclusive_MuonP'],
MuonTRCHI2DOF = config['DiMuonExclusive_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonExclusive_MinMass'],
MuMuVCHI2PDOF = config['DiMuonExclusive_VCHI2PDOF'],
MuMuPT = config['DiMuonExclusive_PT'],
MuMuDZ = config['DiMuonExclusive_DZ']
)
self.DiMuonExclusiveLine = StrippingLine( name + 'DiMuonExclusive' + 'Line',
prescale = config['DiMuonExclusive_Prescale'],
postscale = config['DiMuonExclusive_Postscale'],
checkPV = config['DiMuonExclusive_checkPV'],
selection = self.SelDiMuonExclusive
)
"""
DiMuonNoPVline
"""
self.SelDiMuonNoPV = filterDiMuon( name + 'DiMuonNoPV',
MuonPT = config['DiMuonExclusive_MuonPT'],
MuonP = config['DiMuonExclusive_MuonP'],
MuonTRCHI2DOF = config['DiMuonExclusive_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonExclusive_MinMass'],
MuMuVCHI2PDOF = config['DiMuonExclusive_VCHI2PDOF'],
MuMuPT = config['DiMuonExclusive_PT']
)
self.DiMuonNoPVLine = StrippingLine( name + 'DiMuonNoPV' + 'Line',
prescale = config['DiMuonNoPV_Prescale'],
postscale = config['DiMuonNoPV_Postscale'],
checkPV = False,
FILTER = { 'Code' : "CONTAINS('Rec/Vertex/Primary')<0.5" ,
'Preambulo' : [ 'from LoKiTracks.decorators import *' ,
'from LoKiCore.functions import *' ]
},
selection = self.SelDiMuonExclusive
)
"""
DiMuon High Mass line
"""
self.SelDiMuonHighMass = filterDiMuon( name + 'DiMuonHighMass',
MuonPT = config['DiMuonHighMass_MuonPT'],
MuonP = config['DiMuonHighMass_MuonP'],
MuonTRCHI2DOF = config['DiMuonHighMass_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonHighMass_MinMass'],
MuMuVCHI2PDOF = config['DiMuonHighMass_VCHI2PDOF'],
MuMuPT = config['DiMuonHighMass_PT']
)
self.DiMuonHighMassLine = StrippingLine( name + 'DiMuonHighMass' + 'Line',
prescale = config['DiMuonHighMass_Prescale'],
postscale = config['DiMuonHighMass_Postscale'],
checkPV = config['DiMuonHighMass_checkPV'],
selection = self.SelDiMuonHighMass
)
"""
DiMuon High Mass Same Sign line
"""
self.SelDiMuonHighMassSameSign = filterDiMuonSameSign( name + 'DiMuonHighMassSameSign',
MuonPT = config['DiMuonHighMass_MuonPT'],
MuonP = config['DiMuonHighMass_MuonP'],
MuonTRCHI2DOF = config['DiMuonHighMass_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonHighMass_MinMass'],
MuMuVCHI2PDOF = config['DiMuonHighMass_VCHI2PDOF'],
MuMuPT = config['DiMuonHighMass_PT']
)
self.DiMuonHighMassSameSignLine = StrippingLine( name + 'DiMuonHighMassSameSign' + 'Line',
prescale = config['DiMuonHighMassSameSign_Prescale'],
postscale = config['DiMuonHighMassSameSign_Postscale'],
checkPV = config['DiMuonHighMassSameSign_checkPV'],
selection = self.SelDiMuonHighMassSameSign
)
"""
DiMuon Low Mass line
"""
self.SelDiMuonLowMass = filterDiMuon( name + 'DiMuonLowMass',
MuonPT = config['DiMuonLowMass_MuonPT'],
MuonP = config['DiMuonLowMass_MuonP'],
MuonTRCHI2DOF = config['DiMuonLowMass_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonLowMass_MinMass'],
MuMuVCHI2PDOF = config['DiMuonLowMass_VCHI2PDOF'],
MuMuPT = config['DiMuonLowMass_PT']
)
self.DiMuonLowMassLine = StrippingLine( name + 'DiMuonLowMass' + 'Line',
HLT = "HLT_PASS('Hlt2DiMuonLowMassDecision')",
prescale = config['DiMuonLowMass_Prescale'],
postscale = config['DiMuonLowMass_Postscale'],
checkPV = config['DiMuonLowMass_checkPV'],
selection = self.SelDiMuonLowMass
)
"""
Jpsi-> mumu tight line
"""
self.SelJpsi2MuMu = filterJpsi2MuMu( name + 'Jpsi2MuMu',
MuonPT = config['Jpsi2MuMu_MuonPT'],
MuonP = config['Jpsi2MuMu_MuonP'],
MuonPIDmu = config['Jpsi2MuMu_MuonPIDmu'],
MuonTRCHI2DOF = config['Jpsi2MuMu_MuonTRCHI2DOF'],
MuMuMinMass = config['Jpsi2MuMu_MinMass'],
MuMuMaxMass = config['Jpsi2MuMu_MaxMass'],
MuMuVCHI2PDOF = config['Jpsi2MuMu_VCHI2PDOF'],
MuMuPT = config['Jpsi2MuMu_PT']
)
self.Jpsi2MuMuLine = StrippingLine( name + 'Jpsi2MuMu' + 'Line',
prescale = config['Jpsi2MuMu_Prescale'],
postscale = config['Jpsi2MuMu_Postscale'],
checkPV = config['Jpsi2MuMu_checkPV'],
selection = self.SelJpsi2MuMu
)
"""
Psi(2S)->mumu tight line
"""
self.SelPsi2MuMu = filterSignal( name + 'Psi2MuMu',
ParticleName = config['Psi2MuMu_ParticleName'],
MuonPT = config['Psi2MuMu_MuonPT'],
MuonP = config['Psi2MuMu_MuonP'],
MuonPIDmu = config['Psi2MuMu_MuonPIDmu'],
MuonTRCHI2DOF = config['Psi2MuMu_MuonTRCHI2DOF'],
MuMuMassWindow= config['Psi2MuMu_MassWindow'],
MuMuVCHI2PDOF = config['Psi2MuMu_VCHI2PDOF'],
MuMuPT = config['Psi2MuMu_PT']
)
self.Psi2MuMuLine = StrippingLine( name + 'Psi2MuMu' + 'Line',
prescale = config['Psi2MuMu_Prescale'],
postscale = config['Psi2MuMu_Postscale'],
checkPV = config['Psi2MuMu_checkPV'],
selection = self.SelPsi2MuMu
)
"""
DiMuonDetachedline
"""
self.SelDiMuonDetached = filterDiMuonDetached( name + 'DiMuonDetached',
MuonPT = config['DiMuonDetached_MuonPT'],
MuonP = config['DiMuonDetached_MuonP'],
MuonPIDmu = config['DiMuonDetached_MuonPIDmu'],
MuonTRCHI2DOF = config['DiMuonDetached_MuonTRCHI2DOF'],
MuMuMinMass = config['DiMuonDetached_MinMass'],
MuMuVCHI2PDOF = config['DiMuonDetached_VCHI2PDOF'],
MuMuPT = config['DiMuonDetached_PT'],
MuMuDLS = config['DiMuonDetached_DLS']
)
self.DiMuonDetachedLine = StrippingLine( name + 'DiMuonDetached' + 'Line',
prescale = config['DiMuonDetached_Prescale'],
postscale = config['DiMuonDetached_Postscale'],
checkPV = True,
selection = self.SelDiMuonDetached
)
"""
Jpsi2MuMuDetached tight line
"""
self.SelJpsi2MuMuDetached = filterJpsi2MuMuDetached( name + 'Jpsi2MuMuDetached',
MuonPT = config['Jpsi2MuMuDetached_MuonPT'],
MuonP = config['Jpsi2MuMuDetached_MuonP'],
MuonPIDmu = config['Jpsi2MuMuDetached_MuonPIDmu'],
MuonTRCHI2DOF = config['Jpsi2MuMuDetached_MuonTRCHI2DOF'],
MuMuMinMass = config['Jpsi2MuMuDetached_MinMass'],
MuMuMaxMass = config['Jpsi2MuMuDetached_MaxMass'],
MuMuVCHI2PDOF = config['Jpsi2MuMuDetached_VCHI2PDOF'],
MuMuPT = config['Jpsi2MuMuDetached_PT'],
MuMuDLS = config['Jpsi2MuMuDetached_DLS']
)
self.Jpsi2MuMuDetachedLine = StrippingLine( name + 'Jpsi2MuMuDetached' + 'Line',
prescale = config['Jpsi2MuMuDetached_Prescale'],
postscale = config['Jpsi2MuMuDetached_Postscale'],
checkPV = True,
selection = self.SelJpsi2MuMuDetached
)
"""
Psi2MuMuDetached line
"""
self.SelPsi2MuMuDetached = filterSignalDetached( name + 'Psi2MuMuDetached',
ParticleName = config['Psi2MuMuDetached_ParticleName'],
MuonPT = config['Psi2MuMuDetached_MuonPT'],
MuonP = config['Psi2MuMuDetached_MuonP'],
MuonPIDmu = config['Psi2MuMuDetached_MuonPIDmu'],
MuonTRCHI2DOF = config['Psi2MuMuDetached_MuonTRCHI2DOF'],
MuMuMassWindow= config['Psi2MuMuDetached_MassWindow'],
MuMuVCHI2PDOF = config['Psi2MuMuDetached_VCHI2PDOF'],
MuMuPT = config['Psi2MuMuDetached_PT'],
MuMuDLS = config['Psi2MuMuDetached_DLS']
)
self.Psi2MuMuDetachedLine = StrippingLine( name + 'Psi2MuMuDetached' + 'Line',
prescale = config['Psi2MuMuDetached_Prescale'],
postscale = config['Psi2MuMuDetached_Postscale'],
checkPV = True,
selection = self.SelPsi2MuMuDetached
)
if config['MicroDST']:
self.registerLine( self.DiMuonLine )
self.registerLine( self.DiMuonSameSignLine )
self.registerLine( self.DiMuonPrescaledLine )
else:
self.registerLine( self.DiMuonExclusiveLine )
self.registerLine( self.DiMuonNoPVLine )
self.registerLine( self.DiMuonHighMassLine )
self.registerLine( self.DiMuonHighMassSameSignLine )
self.registerLine( self.DiMuonLowMassLine )
self.registerLine( self.Jpsi2MuMuLine )
self.registerLine( self.Psi2MuMuLine )
self.registerLine( self.DiMuonDetachedLine )
self.registerLine( self.Jpsi2MuMuDetachedLine )
self.registerLine( self.Psi2MuMuDetachedLine )
def filterDiMuon( name,
MuonPT,
MuonP,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuVCHI2PDOF,
MuMuPT
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelMuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
def filterDiMuonSameSign( name,
MuonPT,
MuonP,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuVCHI2PDOF,
MuMuPT
):
_StdLooseDiMuonSameSign = DataOnDemand( Location = 'Phys/StdLooseDiMuonSameSign/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s *MeV)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelMuMuSS",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuonSameSign ]
)
def filterJpsi2MuMu( name,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuMaxMass,
MuMuVCHI2PDOF,
MuMuPT
):
_StdLooseJpsi2MuMu = DataOnDemand( Location = 'Phys/StdLooseJpsi2MuMu/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (MM < %(MuMuMaxMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelJpsi2MuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseJpsi2MuMu ]
)
def filterSignal( name,
ParticleName,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMassWindow,
MuMuVCHI2PDOF,
MuMuPT
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(ADMASS(%(ParticleName)s) < %(MuMuMassWindow)s *MeV) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s *MeV)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelP2MuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
def filterDZ( name,
DZAnyPV,
MySelection ):
return EventSelection (
#
LoKi__VoidFilter( name + 'filterDZ',
Code = " ( minMyZ - minPVZ ) < %(DZAnyPV)s*mm " % locals() ,
Preambulo = [ "from LoKiPhys.decorators import *",
"minMyZ = SOURCE('%s') >> min_value( VFASPF(VZ) )" %(MySelection.outputLocation()) ,
"minPVZ = VSOURCE('Rec/Vertex/Primary') >> min_value(VZ) "
]
)
)
def filterDiMuonAndDZ( name,
MuonPT,
MuonP,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuVCHI2PDOF,
MuMuPT,
MuMuDZ
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s) & (BPVVDZ < %(MuMuDZ)s*mm)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelMuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
def filterDiMuonDetached( name,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuVCHI2PDOF,
MuMuPT,
MuMuDLS
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s) & (BPVDLS>%(MuMuDLS)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelMuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
def filterJpsi2MuMuDetached( name,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMinMass,
MuMuMaxMass,
MuMuVCHI2PDOF,
MuMuPT,
MuMuDLS
):
_StdLooseJpsi2MuMu = DataOnDemand( Location = 'Phys/StdLooseJpsi2MuMu/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s)" % locals()
MuMuCut = "(MM > %(MuMuMinMass)s) & (MM < %(MuMuMaxMass)s) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s) & (BPVDLS>%(MuMuDLS)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelJpsi2MuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseJpsi2MuMu ]
)
def filterSignalDetached( name,
ParticleName,
MuonPT,
MuonP,
MuonPIDmu,
MuonTRCHI2DOF,
MuMuMassWindow,
MuMuVCHI2PDOF,
MuMuPT,
MuMuDLS
):
_StdLooseDiMuon = DataOnDemand( Location = 'Phys/StdLooseDiMuon/Particles' )
MuonCut = "(MINTREE('mu+'==ABSID,PT) > %(MuonPT)s *MeV) & (MINTREE('mu+'==ABSID,P) > %(MuonP)s *MeV) & (MINTREE('mu+'==ABSID,PIDmu) > %(MuonPIDmu)s) & (MAXTREE('mu+'==ABSID,TRCHI2DOF) < %(MuonTRCHI2DOF)s)" % locals()
MuMuCut = "(ADMASS(%(ParticleName)s) < %(MuMuMassWindow)s *MeV) & (VFASPF(VCHI2PDOF)< %(MuMuVCHI2PDOF)s) & (PT > %(MuMuPT)s *MeV) & (BPVDLS>%(MuMuDLS)s)" % locals()
_MuMu = FilterDesktop( Code = MuonCut + " & " + MuMuCut )
return Selection( name + "_SelP2MuMu",
Algorithm = _MuMu,
RequiredSelections = [ _StdLooseDiMuon ]
)
| [
"[email protected]"
] | |
6a5307ad7db7ca33697b63b6436b59f2d9a19557 | 847273de4b1d814fab8b19dc651c651c2d342ede | /.history/solve_20180621175952.py | 3582f849f7b735ae37b11bf925233f5c6574c087 | [] | no_license | Los4U/sudoku_in_python | 0ba55850afcffeac4170321651620f3c89448b45 | 7d470604962a43da3fc3e5edce6f718076197d32 | refs/heads/master | 2020-03-22T08:10:13.939424 | 2018-07-04T17:21:13 | 2018-07-04T17:21:13 | 139,749,483 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,846 | py |
def findNextCellToFill(grid, i, j):
for x in range(i,9):
for y in range(j,9):
if grid[x][y] == 0:
return x,y
for x in range(0,9):
for y in range(0,9):
if grid[x][y] == 0:
return x,y
return -1,-1
def isValid(grid, i, j, e):
rowOk = all([e != grid[i][x] for x in range(9)])
if rowOk:
columnOk = all([e != grid[x][j] for x in range(9)])
if columnOk:
# finding the top left x,y co-ordinates of the section containing the i,j cell
secTopX, secTopY = 3 *(i//3), 3 *(j//3) #floored quotient should be used here.
for x in range(secTopX, secTopX+3):
for y in range(secTopY, secTopY+3):
if grid[x][y] == e:
return False
return True
return False
def solveSudoku(grid, i=0, j=0):
i,j = findNextCellToFill(grid, i, j)
if i == -1:
return True
for e in range(1,10):
if isValid(grid,i,j,e):
grid[i][j] = e
if solveSudoku(grid, i, j):
return True
# Undo the current cell for backtracking
grid[i][j] = 0
return False
input = [[5,1,7,6,0,0,0,3,4],[2,8,9,0,0,4,0,0,0],[3,4,6,2,0,5,0,9,0],[6,0,2,0,0,0,0,1,0],[0,3,8,0,0,6,0,4,7],[0,0,0,0,0,0,0,0,0],[0,9,0,0,0,0,0,7,8],[7,0,3,4,0,0,5,6,0],[0,0,0,0,0,0,0,0,0]]
solveSudoku(input) | [
"[email protected]"
] | |
a7397e10786125cdc8ee81286b7a97fdbc6f1f78 | 38b8bceafb4d80afc7c77196eb9ee99694191bcf | /wxpython/grid2.py | e9749835196d535abce07a36ed5223c8b385ea9f | [] | no_license | tangc1986/PythonStudy | f6c5b384874e82fbf0b5f51cfb7a7a89a48ec0ff | 1ed1956758e971647426e7096ac2e8cbcca585b4 | refs/heads/master | 2021-01-23T20:39:23.930754 | 2017-10-08T07:40:32 | 2017-10-08T07:42:38 | 42,122,267 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | # -*- coding: UTF-8 -*-
__author__ = 'tangchao'
import wx
import wx.grid
class TestFrame(wx.Frame):
rowLabels = ["uno", "dos", "tres", "quatro", "cinco"]
colLabels = ["homer", "marge", "bart", "lisa", "mnaggie"]
def __init__(self):
wx.Frame.__init__(self, None, title="Grid Headers",
size=(500, 200))
grid = wx.grid.Grid(self)
grid.CreateGrid(5, 5)
for row in range(5):
#1 start
grid.SetRowLabelValue(row, self.rowLabels[row])
grid.SetColLabelValue(row, self.colLabels[row])
#1 end
for col in range(5):
grid.SetCellValue(row, col,
"(%s, %s)" % (self.rowLabels[row], self.colLabels[col]))
app = wx.PySimpleApp()
frame = TestFrame()
frame.Show()
app.MainLoop()
| [
"[email protected]"
] | |
ed979ab630c289963e03474eb637faa48e40ab30 | ccdd61e4813c8a0a9f28d23c2ee5b02790cc7456 | /find_kallsyms.py | 6abcbcbc6093bfc8366c561666dd2fec2826f1f2 | [] | no_license | freemanZYQ/ida-kallsyms | 65084ffa65c3d8456fc227b7391ed0c87fbdbd50 | 4f0beb659a3b65e4b1c5056ad9ebba6ac4572b21 | refs/heads/master | 2020-08-15T07:51:23.810578 | 2019-10-10T18:01:48 | 2019-10-10T18:04:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,930 | py | #!/usr/bin/env python3
import logging
import struct
import sys
def try_parse_token_index(rodata, endianness, offset):
index_fmt = endianness + 'H'
index, = struct.unpack(index_fmt, rodata[offset:offset + 2])
assert index == 0, 'The first token index must be 0'
indices = [index]
for _ in range(255):
offset += 2
index, = struct.unpack(index_fmt, rodata[offset:offset + 2])
if index <= indices[-1]:
return None # Token indices must be monotonically increasing.
indices.append(index)
return indices
def find_token_indices(rodata, endianness):
token_index_offset = 0
while True:
# kallsyms_token_index is an array of monotonically increasing 256
# shorts, the first of which is 0. It is located right after
# kallsyms_token_table, which is a sequence of null-terminated strings.
# Therefore, look for 1+2 consecutive zeroes.
token_index_offset = rodata.find(
b'\x00\x00\x00', token_index_offset) + 1
if token_index_offset == 0:
break
token_index = try_parse_token_index(
rodata, endianness, token_index_offset)
if token_index is not None:
yield token_index_offset, token_index
def try_parse_token_table(rodata, token_index, start_offset, end_offset):
tokens = []
for i in range(256):
token_start_offset = start_offset + token_index[i]
if i == 255:
# Last token ends at the end of the table.
token_end_offset = end_offset
else:
# Other tokens end at the start of the next respective token.
token_end_offset = start_offset + token_index[i + 1]
token = rodata[token_start_offset:token_end_offset]
if b'\x00' in token[:-1]:
return None # Tokens must be printable.
if token[-1] != 0:
return None # Tokens must be null-terminated.
tokens.append(token[:-1])
return tokens
def find_token_tables(rodata, token_index, token_index_offset):
last_token_offset = token_index_offset
while True:
# kallsyms_token_table is a sequence of 256 null-terminated strings.
# Find the last token by looking for a trailing \0.
token_table_end_offset = last_token_offset
last_token_offset = rodata.rfind(
b'\x00', 0, last_token_offset - 1) + 1
if last_token_offset == 0:
break
# The last kallsyms_token_index element corresponds to the last token.
# Use that information to locate kallsyms_token_table.
token_table_offset = last_token_offset - token_index[-1]
if token_table_offset < 0:
continue
token_table = try_parse_token_table(
rodata, token_index, token_table_offset, token_table_end_offset)
if token_table is not None:
yield token_table_offset, token_table
def find_markers(rodata, endianness, token_table_offset):
# In 4.20 the size of markers was reduced to 4 bytes.
for marker_fmt, marker_size in (
(endianness + 'I', 4),
(endianness + 'Q', 8),
):
first = True
marker_offset = token_table_offset - marker_size
markers = []
while True:
# kallsyms_markers is an array of monotonically increasing offsets,
# which starts with 0. It is aligned on an 8-byte boundary, so if
# the element size is 4 bytes and their number is odd, it is zero-
# padded at the end.
marker, = struct.unpack(
marker_fmt, rodata[marker_offset:marker_offset + marker_size])
if first:
first = False
if marker == 0 and marker_size == 4:
# Skip padding.
marker_offset -= marker_size
continue
elif len(markers) > 0 and marker >= markers[-1]:
# The array is not monotonically increasing.
return
markers.append(marker)
if marker == 0:
# We found the first element.
break
marker_offset -= marker_size
if marker_size == 4 and len(markers) == 2:
# Marker size must be 8 bytes, and we must be taking the upper
# part, which is always 0, for the first marker.
continue
markers.reverse()
yield marker_offset, markers
def is_name_ok(rodata, token_lengths, offset):
n_tokens = rodata[offset]
if n_tokens == 0 or n_tokens >= 128:
# Tokens are at least one byte long. Names must not be empty, and they
# must be at most 127 characters long.
return False
offset += 1
name_length = 0
for _ in range(n_tokens):
# The caller is expected to have verified that the name entry does not
# span past the end of kallsyms_names, so just fetch the next token.
name_length += token_lengths[rodata[offset]]
if name_length >= 128:
# Name is longer than 127 characters.
return False
offset += 1
return True
def extract_name(rodata, token_table, offset):
# Name must have already been checked, just expand tokens.
n_tokens = rodata[offset]
name = b''
for _ in range(n_tokens):
offset += 1
name += token_table[rodata[offset]]
return name
def find_num_syms(rodata, endianness, token_table, markers_offset):
# kallsyms_names is a sequence of length-prefixed entries ending with
# padding to an 8-byte boundary, followed by kallsyms_markers.
# Unfortunately, some guesswork is required to locate the start of
# kallsyms_names given that we know the start of kallsyms_markers.
num_syms_fmt = endianness + 'I'
token_lengths = [len(token) for token in token_table]
# Indexed by (markers_offset - offset - 1). Each element is a number of
# name entries that follow the respective offset, or None if that offset is
# not a start of a valid name entry.
name_counts = []
# Whether offset still points to one of the trailing zeroes.
trailing_zeroes = True
offset = markers_offset
while offset >= 9:
offset -= 1
if rodata[offset] != 0:
# Trailing zeroes have ended.
trailing_zeroes = False
next_name_offset = offset + rodata[offset] + 1
if next_name_offset >= markers_offset:
# The current name entry spans past the end of kallsyms_names. This
# is allowed if we are still looking at trailing zeroes.
name_counts.append(0 if trailing_zeroes else None)
continue
next_name_count = name_counts[markers_offset - next_name_offset - 1]
if next_name_count is None:
# The next name entry is invalid, which means the current name
# entry cannot be valid.
name_counts.append(None)
continue
if is_name_ok(rodata, token_lengths, offset):
# The current name entry is valid. Check whether it is preceded by
# kallsyms_num_syms value, which is consistent with the number of
# name entries we've seen so far.
name_counts.append(next_name_count + 1)
num_syms1, = struct.unpack(num_syms_fmt, rodata[offset - 4:offset])
if name_counts[-1] == num_syms1:
num_syms_offset = offset - 4
break
num_syms2, = struct.unpack(
num_syms_fmt, rodata[offset - 8:offset - 4])
if name_counts[-1] == num_syms2:
num_syms_offset = offset - 8
break
else:
# The current name entry is not valid. This is allowed if we are
# still looking at trailing zeroes.
name_counts.append(0 if trailing_zeroes else None)
else:
return
# We've found kallsyms_names, now parse it.
names = []
for _ in range(name_counts[-1]):
names.append(extract_name(rodata, token_table, offset).decode())
offset += rodata[offset] + 1
yield num_syms_offset, names
def get_addresses(rodata, endianness, num_syms_offset, num_syms):
# Right now this function understands just one format: non-percpu
# kallsyms_offsets followed by kallsyms_relative_base.
address_fmt = endianness + 'i'
kallsyms_relative_base, = struct.unpack(
endianness + 'Q', rodata[num_syms_offset - 8:num_syms_offset])
addresses_offset = num_syms_offset - 8 - num_syms * 4
if addresses_offset % 8 != 0:
addresses_offset -= 4
offset = addresses_offset
addresses = []
for _ in range(num_syms):
raw, = struct.unpack(address_fmt, rodata[offset:offset + 4])
if raw >= 0:
addresses.append(raw)
else:
addresses.append(kallsyms_relative_base - 1 - raw)
offset += 4
return addresses_offset, addresses
def find_kallsyms_in_rodata(rodata, endianness):
for token_index_offset, token_index in find_token_indices(
rodata, endianness):
logging.debug(
'0x%08X: kallsyms_token_index=%s',
token_index_offset, token_index)
for token_table_offset, token_table in find_token_tables(
rodata, token_index, token_index_offset):
logging.debug(
'0x%08X: kallsyms_token_table=%s',
token_table_offset, token_table)
for markers_offset, markers in find_markers(
rodata, endianness, token_table_offset):
logging.debug(
'0x%08X: kallsyms_markers=%s',
markers_offset, markers)
for num_syms_offset, names in find_num_syms(
rodata, endianness, token_table, markers_offset):
logging.debug(
'0x%08X: kallsyms_num_syms=%s',
num_syms_offset, len(names))
addresses_offset, addresses = get_addresses(
rodata, endianness, num_syms_offset, len(names))
kallsyms_end = token_index_offset + (256 * 2)
kallsyms_size = kallsyms_end - addresses_offset
logging.debug(
'0x%08X: kallsyms[0x%08X]',
addresses_offset, kallsyms_size)
return zip(addresses, names)
return []
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) != 3:
print('Usage: {} PATH ENDIANNESS'.format(sys.argv[0]))
sys.exit(1)
rodata_path, endianness = sys.argv[1:]
with open(rodata_path, 'rb') as fp:
rodata = bytearray(fp.read())
for address, name in find_kallsyms_in_rodata(rodata, endianness):
print('{:016X} {}'.format(address, name))
| [
"[email protected]"
] | |
4322e050d3909e3df8ab3c4a1ef098fa511d9eb0 | ab4f74d127bfc89813ee359bb9c779eca5426ddc | /script/label_image.runfiles/org_tensorflow/tensorflow/contrib/signal/python/ops/mfcc_ops.py | 65b5335b3ae79e1ab35ee59ea3fb837590a4b44e | [
"MIT"
] | permissive | harshit-jain-git/ImageNET | cdfd5a340b62862ad8d1cc3b9a0f30cccc481744 | 1cd4c2b70917e4709ce75422c0205fe3735a1b01 | refs/heads/master | 2022-12-11T12:47:46.795376 | 2017-12-19T05:47:26 | 2017-12-19T05:47:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 94 | py | /home/co/Documents/ImageClassifier/tensorflow/tensorflow/contrib/signal/python/ops/mfcc_ops.py | [
"[email protected]"
] | |
ba3716f0dc54c992ee423cea11f9fbcde6fadde9 | 9cc3135d5fcd781c0542a905c61dc19b0ceeffef | /alien_colors_ver1-1.py | cad614ac833cdc33de423b5b07873c40dfe1f32c | [] | no_license | bkalcho/python-crash-course | 411d8af223fb6974d4f890c0f82c9e56b062359c | 8425649a2ecd5abeeb438e816400f270d937758e | refs/heads/master | 2022-09-11T13:47:56.837256 | 2022-08-23T10:04:35 | 2022-08-23T10:04:35 | 69,810,386 | 14 | 8 | null | 2022-08-23T10:04:36 | 2016-10-02T17:14:41 | Python | UTF-8 | Python | false | false | 185 | py | # Author: Bojan G. Kalicanin
# Date: 29-Sep-2016
# If alien color is not green nothing will be printed
alien_color = 'red'
if alien_color == 'green':
print('You earned 5 points.') | [
"[email protected]"
] | |
ccab0aa42ec4c0223f0a0dc999e0a97fcb427b0c | 56cf6ed165c4fe90782dc03c60f5a976d33064a8 | /batch/rootplots/finalplots.py | 2118fdf2fdc0e850d7fae41f9e33d60ee63c8444 | [] | no_license | aminnj/scouting | 36b5a08927e8fa6061cbd1b70ce23b674c56bcc1 | ed7bd442aaa1f53b21378d2a7fbf10ca7869ecc7 | refs/heads/master | 2021-06-24T05:09:20.248605 | 2021-04-11T05:11:27 | 2021-04-11T05:11:27 | 208,143,579 | 1 | 2 | null | 2019-11-25T22:49:29 | 2019-09-12T20:47:01 | Jupyter Notebook | UTF-8 | Python | false | false | 14,830 | py | import os
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import glob
from matplotlib.colors import LogNorm
from tqdm.auto import tqdm
import time
import re
import subprocess
import json
import requests
import uproot4
from yahist import Hist1D, Hist2D
def set_plotting_style():
from matplotlib import rcParams
rcParams["font.family"] = "sans-serif"
rcParams["font.sans-serif"] = ["Helvetica", "Arial", "Liberation Sans", "Bitstream Vera Sans", "DejaVu Sans"]
rcParams['legend.fontsize'] = 11
rcParams['legend.labelspacing'] = 0.2
rcParams['hatch.linewidth'] = 0.5 # https://stackoverflow.com/questions/29549530/how-to-change-the-linewidth-of-hatch-in-matplotlib
rcParams['axes.xmargin'] = 0.0 # rootlike, no extra padding within x axis
rcParams['axes.labelsize'] = 'x-large'
rcParams['axes.formatter.use_mathtext'] = True
rcParams['legend.framealpha'] = 0.65
rcParams['axes.labelsize'] = 'x-large'
rcParams['axes.titlesize'] = 'large'
rcParams['xtick.labelsize'] = 'large'
rcParams['ytick.labelsize'] = 'large'
rcParams['figure.subplot.hspace'] = 0.1
rcParams['figure.subplot.wspace'] = 0.1
rcParams['figure.subplot.right'] = 0.97
rcParams['figure.subplot.top'] = 0.92
rcParams['figure.max_open_warning'] = 0
rcParams['figure.dpi'] = 100
rcParams["axes.formatter.limits"] = [-5,4] # scientific notation if log(y) outside this
def add_cms_info_1d(ax, typ="Preliminary", lumi="101", xtype=0.105):
ax.text(0.0, 1.01,"CMS", horizontalalignment='left', verticalalignment='bottom', transform = ax.transAxes, name="Arial", weight="bold", size=15)
ax.text(xtype, 1.01,typ, horizontalalignment='left', verticalalignment='bottom', transform = ax.transAxes, name="Arial", style="italic", size=14)
if lumi is not None:
ax.text(0.99, 1.01,"%s fb${}^\mathregular{-1}$ (13 TeV)" % (lumi), horizontalalignment='right', verticalalignment='bottom', transform = ax.transAxes, size=13)
else:
ax.text(0.99, 1.01,"(13 TeV)", horizontalalignment='right', verticalalignment='bottom', transform = ax.transAxes, size=13)
def add_cms_info_2d(ax, typ="Preliminary", lumi="101", xtype=0.15):
ax.text(0.0, 1.01,"CMS", horizontalalignment='left', verticalalignment='bottom', transform = ax.transAxes, name="Arial", weight="bold", size=14)
ax.text(xtype, 1.01,"Preliminary", horizontalalignment='left', verticalalignment='bottom', transform = ax.transAxes, name="Arial", style="italic", size=13)
ax.text(0.99, 1.01,"%s fb${}^\mathregular{-1}$ (13 TeV)" % (lumi), horizontalalignment='right', verticalalignment='bottom', transform = ax.transAxes, size=12)
# ax.text(0.99, 1.01,"(13 TeV)", horizontalalignment='right', verticalalignment='bottom', transform = ax.transAxes, size=12)
def to_yahist(h, overflow=False):
if "TH1" in str(type(h)):
c, e = h.to_numpy(flow=overflow)
if overflow:
c[1] += c[0]
c[-2] += c[-1]
c = c[1:-1]
e = e[1:-1]
h = Hist1D.from_bincounts(c, e)
else:
c, ex, ey = h.to_numpy(flow=False)
h = Hist2D.from_bincounts(c.T, (ex, ey))
return h
set_plotting_style()
# model_info = {
# ("bphi",0.5,1): dict(label=r"B$\rightarrow\phi$ (0.5GeV,c$\tau$=1mm)", color=[0.98,0.85,0.29], fname="output_BToPhi_mphi0p5_ctau1mm.root"),
# ("bphi",2,10): dict(label=r"B$\rightarrow\phi$ (2GeV,c$\tau$=10mm)", color=[0.94,0.58,0.21], fname="output_BToPhi_mphi2_ctau10mm.root"),
# ("bphi",4,100): dict(label=r"B$\rightarrow\phi$ (4GeV,c$\tau$=100mm)", color=[0.92,0.28,0.15], fname="output_BToPhi_mphi4_ctau100mm.root"),
# ("hzd",2,100): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (2GeV,c$\tau$=100mm)", color=[0.46,0.98,0.73], fname="output_HToZdZdTo2Mu2X_mzd2_ctau100mm.root"),
# ("hzd",8,10): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (8GeV,c$\tau$=10mm)", color=[0.33,0.73,0.98], fname="output_HToZdZdTo2Mu2X_mzd8_ctau10mm.root"),
# ("hzd",15,1): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (15GeV,c$\tau$=1mm)", color=[0.53,0.10,0.96], fname="output_HToZdZdTo2Mu2X_mzd15_ctau1mm.root"),
# }
model_info = {
("bphi",0.5,1): dict(label=r"B$\rightarrow\phi$ (0.5GeV,c$\tau$=1mm)", color="C0", fname="output_BToPhi_mphi0p5_ctau1mm.root"),
("bphi",2,10): dict(label=r"B$\rightarrow\phi$ (2GeV,c$\tau$=10mm)", color="C1", fname="output_BToPhi_mphi2_ctau10mm.root"),
("bphi",4,100): dict(label=r"B$\rightarrow\phi$ (4GeV,c$\tau$=100mm)", color="C2", fname="output_BToPhi_mphi4_ctau100mm.root"),
("hzd",2,100): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (2GeV,c$\tau$=100mm)", color="C4", fname="output_HToZdZdTo2Mu2X_mzd2_ctau100mm.root"),
("hzd",8,10): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (8GeV,c$\tau$=10mm)", color="C3", fname="output_HToZdZdTo2Mu2X_mzd8_ctau10mm.root"),
("hzd",15,1): dict(label=r"H$\rightarrow \mathrm{Z_d Z_d}$ (15GeV,c$\tau$=1mm)", color="C5", fname="output_HToZdZdTo2Mu2X_mzd15_ctau1mm.root"),
}
os.system("mkdir -p plots_selection")
def plot_1():
with uproot4.open("mcoutputs/main/output_HToZdZdTo2Mu2X_mzd8_ctau10mm.root") as f:
fig, ax = plt.subplots()
label = model_info[("hzd",8,10)]["label"]
h1 = to_yahist(f["DV_rho_tot"], overflow=False).rebin(2)
h1.plot(ax=ax, label=f"{label}, before veto", color="k", lw=2.0)
h2 = to_yahist(f["DV_rho_matveto"], overflow=False).rebin(2)
eff = h2.integral/h1.integral * 100.
h2.plot(ax=ax, label=f"{label}, after veto (eff. = {eff:.1f}%)", color="C3", lw=1.0)
add_cms_info_1d(ax, lumi=None, typ="Simulation")
ax.set_ylim(bottom=0.)
ax.set_ylabel("Unweighted events", ha="right", y=1.)
ax.set_xlabel(r"$l_\mathrm{xy}$ (cm)", ha="right", x=1., labelpad=-1.0)
fname = f"plots_selection/signal_passL1_lxy_materialveto.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
def plot_2():
with uproot4.open("dataoutputs/main/output.root") as f:
fig, ax = plt.subplots()
label = r"Data"
h1 = to_yahist(f["DV_rho_tot"], overflow=False)
h1.plot(ax=ax, label=f"{label}, before veto", color="k", lw=2.0)
h2 = to_yahist(f["DV_rho_matveto"], overflow=False)
eff = h2.integral/h1.integral * 100.
h2.plot(ax=ax, label=f"{label}, after veto", color="C3", lw=1.0)
add_cms_info_1d(ax)
ax.set_yscale("log")
ax.set_ylabel("Events", ha="right", y=1.)
ax.set_xlabel(r"$l_\mathrm{xy}$ (cm)", ha="right", x=1., labelpad=-1.0)
fname = f"plots_selection/data_passL1_lxy_materialveto.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
def plot_3():
with uproot4.open("dataoutputs/main/output.root") as f:
for saxis in ["xy", "rhoz"]:
for which in ["all", "pass"]:
fig, ax = plt.subplots()
hname = None
if saxis == "xy":
if which == "all": hname = "DV_y_vs_x_tot"
if which == "pass": hname = "DV_y_vs_x_matveto"
xlabel = "DV x (cm)"
ylabel = "DV y (cm)"
if saxis == "rhoz":
if which == "all": hname = "DV_rho_vs_z_tot"
if which == "pass": hname = "DV_rho_vs_z_matveto"
xlabel = "DV z (cm)"
ylabel = r"DV $\rho$ (cm)"
h = to_yahist(f[hname])
h.plot(ax=ax, logz=True, cmap="viridis")
add_cms_info_2d(ax)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_aspect(1.0 if saxis == "xy" else 2.5)
fname = f"plots_selection/passL1_DV_{saxis}_{which}.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
def plot_4():
with uproot4.open("dataoutputs/nm1/output.root") as f:
for which in ["nDV", "nMuon"]:
fig, ax = plt.subplots()
hname = f"{which}_vs_run"
xlabel = "run number"
ylabel = f"average reco. {which}"
h = to_yahist(f[hname])
h = h.restrict(300000,None)
h = h.rebin(2,1)
h = h.profile("x")
h.plot(ax=ax, show_errors=True, ms=2., color="k")
add_cms_info_1d(ax)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if which == "nDV":
ax.set_ylim(1.4,1.6)
if which == "nMuon":
ax.set_ylim(2.4,2.6)
fname = f"plots_selection/passL1_{which}_vs_run.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
def plot_5():
f_data = uproot4.open("dataoutputs/nm1/output.root")
f_mc = uproot4.open("mcoutputs/nm1/output_HToZdZdTo2Mu2X_mzd8_ctau10mm.root")
for which, xlabel in [
("xError", "DV x Error (cm)"),
("yError", "DV y Error (cm)"),
("zError", "DV z Error (cm)"),
("chi2ndof", "DV chi2/ndof"),
("lxy", "$l_\mathrm{xy}$ (cm)"),
]:
fig, ax = plt.subplots()
hname = f"{which}_inc"
ylabel = "Events"
h1 = to_yahist(f_data[hname])
h1.plot(ax=ax, color="k", label="Data")
label = model_info[("hzd",8,10)]["label"]
h2 = to_yahist(f_mc[hname])
h2 *= h1.integral/h2.integral
h2.plot(ax=ax, color="C3", label=label)
add_cms_info_1d(ax)
ax.set_xlabel(xlabel, ha="right", x=1., labelpad=-1.0)
ax.set_ylabel("Events", ha="right", y=1.)
ax.set_yscale("log")
fname = f"plots_selection/passL1_DV_{which}.pdf"
print(fname)
fig.savefig(fname)
os.system(f"ic {fname}")
f_data.close()
f_mc.close()
def plot_6():
f_data = uproot4.open("dataoutputs/main/output.root")
hists_data = dict()
for k,v in f_data.items():
if "_lxy" not in k: continue
k = str(k).rsplit(";",1)[0]
hists_data[k] = to_yahist(v)
f_data.close()
hists_mc = dict()
for mk,model in model_info.items():
print(mk, model)
hists_mc[mk] = dict()
fname = model["fname"]
f_mc = uproot4.open(f"mcoutputs/main/{fname}")
for k,v in f_mc.items():
if "_lxy" not in k: continue
k = str(k).rsplit(";",1)[0]
h = to_yahist(v)
h = Hist1D(h, label=model["label"], color=model["color"])
hists_mc[mk][k] = h
f_mc.close()
for which, xlabel, log, line in [
("dimupt_full", r"dimuon $p_\mathrm{T}$", False, 25.),
("mu2pt_trig", r"trailing muon $p_\mathrm{T}$", False, None),
("mu2eta_trig", r"trailing muon $\eta$", False, None),
("mu2chi2ndof_trig", r"Trailing muon $\chi^2/\mathrm{ndof}$", False, 3.),
("mu2trkmeas_trig", r"Trailing muon tracker layers with meas.", False, 6.),
("absdphimudv_passid", r"|$\Delta\phi(\mu,\vec{\mathrm{DV}})$|", True, 0.02),
("absdphimumu_passid", r"|$\Delta\phi(\mu_1,\mu_2)$|", False, 2.8),
("mu2trackiso_passkin", r"Trailing muon relative track isolation", True, 0.1),
("mu2drjet_passkin", r"$\Delta R(\mu_2,\mathrm{jet})$", True, 0.3),
("mu2excesshits_baseline", r"Trailing muon n(valid-expected) pixel hits", False, 0.5),
("logabsetaphi_baseline", r"$\mathrm{log_{10}abs}(\Delta\eta_{\mu\mu}/\Delta\phi_{\mu\mu})$", False, 1.25),
("mindxy_extraiso", r"minimum $|d_\mathrm{xy}|$", True, None),
("mindxysig_extraiso", r"minimum $d_\mathrm{xy}$ significance", True, 2.),
("mindxyscaled_extraiso", r"minimum lifetime-scaled |$d_\mathrm{xy}$|", True, 0.1),
("mu2pt_incl", r"trailing muon $p_\mathrm{T}$", False, None),
("mu2eta_incl", r"trailing muon $\eta$", False, None),
("mu2chi2ndof_incl", r"Trailing muon $\chi^2/\mathrm{ndof}$", False, 3.),
("mu2trkmeas_incl", r"Trailing muon tracker layers with meas.", False, 6.),
("absdphimudv_incl", r"|$\Delta\phi(\mu\mu,\vec{\mathrm{DV}})$|", True, 0.02),
("absdphimumu_incl", r"|$\Delta\phi(\mu_1,\mu_2)$|", False, 2.8),
("mu2trackiso_incl", r"Trailing muon relative track isolation", True, 0.1),
("mu2drjet_incl", r"$\Delta R(\mu_2,\mathrm{jet})$", True, 0.3),
("mu2excesshits_incl", r"Trailing muon n(valid-expected) pixel hits", False, 0.5),
("logabsetaphi_incl", r"$\mathrm{log_{10}abs}(\Delta\eta_{\mu\mu}/\Delta\phi_{\mu\mu})$", False, 1.25),
("mindxy_incl", r"minimum $|d_\mathrm{xy}|$", True, None),
("mindxysig_incl", r"minimum $d_\mathrm{xy}$ significance", True, 2.),
("mindxyscaled_incl", r"minimum lifetime-scaled |$d_\mathrm{xy}$|", True, 0.1),
]:
hnames = set([k.rsplit("_",1)[0] for k in hists_data.keys() if k.startswith(which)])
for basehname in hnames:
lxystr = basehname.split("_lxy",1)[1].split("_")[0]
lxylow, lxyhigh = list(map(float, lxystr.replace("p",".").split("to")))
fig, ax = plt.subplots()
h = hists_data[f"{basehname}_lowmass"]
N = h.integral
h = h.normalize()
label = "Data (mass < 5 GeV)"
if which in ["dimupt_full"]:
label += f" [N = {int(N):,}]"
h.plot(ax=ax, show_errors=True, color="k", label=label, ms=3.5)
h = hists_data[f"{basehname}_highmass"]
N = h.integral
h = h.normalize()
label = "Data (mass > 5 GeV)"
if which in ["dimupt_full"]:
label += f" [N = {int(N):,}]"
h.plot(ax=ax, show_errors=True, color="b", label=label, ms=3.5)
for mk in hists_mc.keys():
h = hists_mc[mk][f"{basehname}_allmass"]
h = h.normalize()
h.plot(ax=ax, histtype="step")
if line is not None:
ax.axvline(line,color="red",linestyle="--")
add_cms_info_1d(ax)
ax.set_xlabel(xlabel, ha="right", x=1., labelpad=-1.0)
ax.set_ylabel("Fraction of events", ha="right", y=1.)
ax.set_title(rf"{lxylow} cm < $l_\mathrm{{xy}}$ < {lxyhigh} cm", color=(0.2,0.2,0.2))
if log:
ax.set_yscale("log")
fname = f"plots_selection/{basehname}.pdf"
print(fname)
fig.savefig(fname)
# os.system(f"ic {fname}")
if __name__ == "__main__":
pass
# # plot_1()
# plot_2()
# plot_3()
# # plot_4()
# plot_5()
plot_6()
| [
"[email protected]"
] | |
42a51fbfbf765fe3650c8ab9c41927a8259c62ff | 9a0ada115978e9600ad7f1eab65fcc8825f637cf | /work_in_progress/_old/stage_aligment_convert/remove_stage_point.py | 45542356653d90923ad1ca5276940178c3a9f832 | [] | no_license | ver228/work-in-progress | c1971f8d72b9685f688a10e4c5a1b150fa0812da | ef5baecc324da4550f81edb0513d38f039ee3429 | refs/heads/master | 2018-12-16T22:18:55.457290 | 2018-09-14T09:27:49 | 2018-09-14T09:27:49 | 56,165,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Aug 7 19:45:26 2017
@author: ajaver
"""
import tables
import numpy as np
import os
import pymysql
from tierpsy.analysis.contour_orient.correctVentralDorsal import switchCntSingleWorm
if __name__ == '__main__':
conn = pymysql.connect(host='localhost', database='single_worm_db')
cur = conn.cursor(pymysql.cursors.DictCursor)
sql = '''
SELECT *
FROM experiments_full
'''
cur.execute(sql)
f_data = cur.fetchall()
for irow, row in enumerate(f_data):
fpath = os.path.join(row['results_dir'], row['base_name'])
masked_file = fpath + '.hdf5'
skeletons_file = fpath + '_skeletons.hdf5'
if os.path.exists(skeletons_file):
print(irow+1, len(f_data))
switchCntSingleWorm(skeletons_file)
# with tables.File(skeletons_file, 'r+') as fid:
# if '/stage_movement' in fid:
# exit_flag = fid.get_node('/stage_movement')._v_attrs['has_finished']
# if exit_flag > 0:
# frame_diffs = fid.get_node('/stage_movement/frame_diffs')[:]
# if exit_flag > 1 or np.any(frame_diffs<0):
#
# print(exit_flag, irow, row['base_name'])
# if '/stage_movement' in fid:
# fid.remove_node('/stage_movement', recursive=True)
# if '/provenance_tracking/STAGE_ALIGMENT' in fid:
# fid.remove_node('/provenance_tracking/STAGE_ALIGMENT', recursive=True)
#
# for ext in ['_features.hdf5', '.wcon.zip']:
# fname = fpath + ext
# if os.path.exists(fname):
# os.remove(fname) | [
"[email protected]"
] | |
fc4489fe4def15e7a8ccd94df2f27d10fc6dad76 | 537259790008b727c03c56ec55a6aaaeeeaf65a3 | /scrapers/tvrelease_scraper.py | 533a9a8e18bb3485693ce0a1c03222774e2bd2a3 | [] | no_license | djbijo/salts | a5781ac9958b77c2acfacf4f73a5286e0b91d8e2 | 9eaa736701833eedf6796403da33d648aaf348f8 | refs/heads/master | 2020-12-11T03:26:15.843807 | 2015-04-09T18:35:45 | 2015-04-09T18:35:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,354 | py | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import scraper
import urllib
import urlparse
import re
import xbmcaddon
from salts_lib import log_utils
from salts_lib.constants import VIDEO_TYPES
from salts_lib.db_utils import DB_Connection
from salts_lib.constants import QUALITIES
BASE_URL = 'http://tv-release.net'
QUALITY_MAP = {'MOVIES-XVID': QUALITIES.MEDIUM, 'TV-XVID': QUALITIES.HIGH, 'TV-MP4': QUALITIES.HIGH,
'TV-480P': QUALITIES.HIGH, 'MOVIES-480P': QUALITIES.HIGH, 'TV-720P': QUALITIES.HD, 'MOVIES-720P': QUALITIES.HD}
class TVReleaseNet_Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.db_connection = DB_Connection()
self.base_url = xbmcaddon.Addon().getSetting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.MOVIE, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'TVRelease.Net'
def resolve_link(self, link):
return link
def format_source_label(self, item):
return '[%s] %s' % (item['quality'], item['host'])
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url:
url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(url, cache_limit=.5)
q_str = ''
match = re.search('>Category.*?td_col">([^<]+)', html)
if match:
q_str = match.group(1).upper()
pattern = "td_cols.*?href='([^']+)"
for match in re.finditer(pattern, html):
url = match.group(1)
if re.search('\.rar(\.|$)', url):
continue
hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'direct': False}
hoster['host'] = urlparse.urlsplit(url).hostname
hoster['quality'] = self._get_quality(video, hoster['host'], QUALITY_MAP.get(q_str, None))
hosters.append(hoster)
return hosters
def get_url(self, video):
return self._blog_get_url(video, delim=' ')
@classmethod
def get_settings(cls):
settings = super(TVReleaseNet_Scraper, cls).get_settings()
settings = cls._disable_sub_check(settings)
name = cls.get_name()
settings.append(' <setting id="%s-filter" type="slider" range="0,180" option="int" label=" Filter results older than (0=No Filter) (days)" default="30" visible="eq(-6,true)"/>' % (name))
settings.append(' <setting id="%s-select" type="enum" label=" Automatically Select" values="Most Recent|Highest Quality" default="0" visible="eq(-7,true)"/>' % (name))
return settings
def search(self, video_type, title, year):
search_url = urlparse.urljoin(self.base_url, '/?s=')
search_url += urllib.quote(title)
if video_type == VIDEO_TYPES.EPISODE:
search_url += '&cat=TV-XviD,TV-Mp4,TV-720p,TV-480p,'
else:
search_url += '&cat=Movies-XviD,Movies-720p,Movies-480p'
html = self._http_get(search_url, cache_limit=.25)
pattern = "posts_table.*?<a[^>]+>(?P<quality>[^<]+).*?href='(?P<url>[^']+)'>(?P<post_title>[^<]+).*?(?P<date>[^>]+)</td></tr>"
date_format = '%Y-%m-%d %H:%M:%S'
return self._blog_proc_results(html, pattern, date_format, video_type, title, year)
def _http_get(self, url, cache_limit=8):
return super(TVReleaseNet_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, cache_limit=cache_limit)
| [
"[email protected]"
] | |
5b9a0e8151fc4c44ee36a6bf9630696e3772d3bf | c9500ad778b8521aaa85cb7fe3239989efaa4799 | /plugins/proofpoint_tap/unit_test/test_get_blocked_clicks.py | 20515642f86f5bf197f87ee9b53be6017f8d31ab | [
"MIT"
] | permissive | rapid7/insightconnect-plugins | 5a6465e720f114d71b1a82fe14e42e94db104a0b | 718d15ca36c57231bb89df0aebc53d0210db400c | refs/heads/master | 2023-09-01T09:21:27.143980 | 2023-08-31T10:25:36 | 2023-08-31T10:25:36 | 190,435,635 | 61 | 60 | MIT | 2023-09-14T08:47:37 | 2019-06-05T17:05:12 | Python | UTF-8 | Python | false | false | 3,159 | py | import sys
import os
from unittest.mock import patch
from komand_proofpoint_tap.actions.get_blocked_clicks import GetBlockedClicks
from insightconnect_plugin_runtime.exceptions import PluginException
from komand_proofpoint_tap.util.exceptions import ApiException
from test_util import Util
from unittest import TestCase
from parameterized import parameterized
sys.path.append(os.path.abspath("../"))
@patch("requests.request", side_effect=Util.mocked_requests_get)
class TestGetBlockedClicks(TestCase):
@classmethod
def setUpClass(cls) -> None:
cls.action = Util.default_connector(GetBlockedClicks())
@parameterized.expand(
[
[
"blocked_clicks",
Util.read_file_to_dict("inputs/get_blocked_clicks.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks.json.exp"),
],
[
"blocked_clicks_cleared_status",
Util.read_file_to_dict("inputs/get_blocked_clicks_cleared_status.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_cleared_status.json.exp"),
],
[
"blocked_clicks_without_url",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_url.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_url.json.exp"),
],
[
"blocked_clicks_without_time_start",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_start.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_start.json.exp"),
],
[
"blocked_clicks_without_time_end",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_end.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_end.json.exp"),
],
[
"blocked_clicks_without_time_start_end",
Util.read_file_to_dict("inputs/get_blocked_clicks_without_time_start_end.json.inp"),
Util.read_file_to_dict("expected/get_blocked_clicks_without_time_start_end.json.exp"),
],
]
)
def test_get_blocked_clicks(self, mock_request, test_name, input_params, expected):
actual = self.action.run(input_params)
self.assertDictEqual(actual, expected)
@parameterized.expand(
[
[
"blocked_clicks_timerange_invalid",
Util.read_file_to_dict("inputs/get_blocked_clicks_timerange_invalid.json.inp"),
PluginException.causes[PluginException.Preset.BAD_REQUEST],
PluginException.assistances[PluginException.Preset.BAD_REQUEST],
],
]
)
def test_get_blocked_clicks_raise_exception(self, mock_request, test_name, input_params, cause, assistance):
with self.assertRaises(ApiException) as error:
self.action.run(input_params)
self.assertEqual(error.exception.cause, cause)
self.assertEqual(error.exception.assistance, assistance)
| [
"[email protected]"
] | |
9751b47661d97074ea93280984aa3a93a3a7246f | 6b81296eff6aac2b81326a3f97a7240321d085d1 | /pycampaign06[for loop].py | 6fa1c9058afbcf87d31d21acee1273479a816d0b | [
"Unlicense"
] | permissive | EssamSami5155/PyCampaign20 | 0d267c586e6060824c147a54a1cbc8d01c672e87 | 7c8dba63de1a499742c748a1b85d00eeebbb38d6 | refs/heads/master | 2022-12-17T06:32:49.112717 | 2020-09-20T09:49:51 | 2020-09-20T09:49:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,738 | py | # Repeating events
# Using for loop
import turtle # turtle is a library that helps us to draw.
turtle.color("blue")
turtle.forward(100)
turtle.right(45)
turtle.color("green")
turtle.forward(50)
turtle.right(45)
turtle.color("grey")
turtle.forward(100)
# turtle commands
# right(x) - rotate right x degrees.
# left(x) - rotate left x degrees.
# colour("x") - change pen color to x.
# forward(x) - move forward x.
# backward(x) - move backward x.
# drawing square with turtle
import turtle
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
turtle.right(90)
turtle.forward(100)
# this is a bad way to complete this task.
# we can use loops to make this task easiar.
# Loops allows us to repeat the same line of code as often as we want.
# exp-
import turtle
for steps in range(4): # for loop is a special kind of loop which allows us to specifice how many time we need to execute this code.
turtle.forward(65)
turtle.left(90)
# in this code "steps" is a variable. we can name it anything.
# Nested loops
import turtle
for steps in range(4):
turtle.forward(100)
turtle.right(90)
for moresteps in range(4):
turtle.forward(50)
turtle.right(90)
# variables inside loop
import turtle
shawki=8
for steps in range(shawki):
turtle.forward(100)
turtle.right(360/shawki)
for moresteps in range(shawki):
turtle. forward(50)
turtle.right(360/shawki)
# In python counting starts to 0. But we can specify numbers to count to or form.
for steps in range(1,10,2):
print(steps)
# here counting starts to 1 from 10. but it skips 1 numbers after each step.
# we can also tell python exactly what values we want to use in the loop.
for steps in[1,2,3,4,5]:
print(steps)
# even we dont have to use numbers.
import turtle
for steps in ["red","blue","green","black"]:
turtle.color(steps)
turtle.forward(100)
turtle.right(90)
print(steps)
# Drawing a nested object
import turtle
print("Today we are going to draw an object using turtle librery in python.")
print("Tell us your opinion")
user=int(input("How many sides the object will have?\n"))
for steps in range(user):
turtle.forward(160)
turtle.right(360/user)
for moresteps in range(user):
turtle.forward(50)
turtle.right(360/user)
# displaying febonacci series using for loop
first=0
second=1
n=int(input("enter how many numbers you want in this series: "))
for i in range(n):
print(first)
temp=first
first=second
second=temp+second
# display the sum of the series:1,3,5,7,9,11.......1119 using list
#first method-
first = 1
listf=[]
while first<=1119:
listf.append(first)
first=first+2
num=len(listf)
v1=listf[0]
v2=listf[-1]
sum=(v1+v2)*num/2
print(sum)
# second method
first = 1
total=0
listf=[]
while first<=1119:
listf.append(first)
first=first+2
for steps in listf:
total=total+steps
print(total)
# third method
# list function converts to list
# range function is used to create a range of numbers.
# here range function indicates 1 to 1121, but not including 1121.
# and the third part indicates the gap between two number.
c=list(range(1,1121,2))
total=0
for steps in c:
total=total+steps
print(total)
# fourth method
# without using list
total=0
for steps in range(1,1121,2):
total=total+steps
#or total+=steps
print(total)
#fifth method
# using while loop
total=0
j=1
while j < 1121:
total += j
j += 2
print(total)
# sixth method
# easiest method
# one line code
print(sum(range(1,1121,2)))
# sum of those values which are the multiple of 3 from a range.
total=0
for steps in range(1,10000):
if steps % 3 == 0:
total += steps
print(total)
# sum of those values which are the multiple of 3 and 5 less than 100.
total=0
for steps in range(1,100):
if steps % 3 == 0 and steps % 5 == 0:
total += steps
print(total)
# displaying a lists first value 1 time, second value 2 time, third value 3 time,....
a=["banana","apple","mango"]
for i in range(len(a)):
for j in range(i+1):
print(a[i])
# break keyword.
nums=[1,2,3,4,5]
for n in nums:
if n == 3:
print("found!")
break
print(n)
# when the conditional is true, break keyword will breaks out the loop. It will ignore the value 3.
# continue keyword
# what if we want to ignore a value but not break out of the loop completely?
nums=[1,2,3,4,5]
for n in nums:
if n == 3:
print("found!")
continue
print(n)
# continue will skip to next value of the loop.
turtle.done() | [
"[email protected]"
] | |
8e342fda3a94a05c58c38e8e184d902cc8d9cd7a | c247a1979a843d03cda72229514f124a6d30f3b6 | /testproject_26580/settings.py | f3d0557da66a902ae2670ed1f3b61d54dbd0466f | [] | no_license | crowdbotics-apps/testproject-26580 | da91f9b5c5bde6ddebba9f11da0f141bbc4eb1b7 | ebc95b90a2b0621069ba4549fbc962456d9163ad | refs/heads/master | 2023-04-28T23:36:20.340815 | 2021-05-11T23:43:26 | 2021-05-11T23:43:26 | 366,539,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,784 | py | """
Django settings for testproject_26580 project.
Generated by 'django-admin startproject' using Django 2.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
import environ
import logging
env = environ.Env()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.bool("DEBUG", default=False)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.str("SECRET_KEY")
ALLOWED_HOSTS = env.list("HOST", default=["*"])
SITE_ID = 1
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
SECURE_SSL_REDIRECT = env.bool("SECURE_REDIRECT", default=False)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
]
LOCAL_APPS = [
'home',
'modules',
'users.apps.UsersConfig',
]
THIRD_PARTY_APPS = [
'rest_framework',
'rest_framework.authtoken',
'rest_auth',
'rest_auth.registration',
'bootstrap4',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
'django_extensions',
'drf_yasg',
'storages',
]
INSTALLED_APPS += LOCAL_APPS + THIRD_PARTY_APPS
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'testproject_26580.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'testproject_26580.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
if env.str("DATABASE_URL", default=None):
DATABASES = {
'default': env.db()
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
MIDDLEWARE += ['whitenoise.middleware.WhiteNoiseMiddleware']
AUTHENTICATION_BACKENDS = (
'django.contrib.auth.backends.ModelBackend',
'allauth.account.auth_backends.AuthenticationBackend'
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles")
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'static')]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# allauth / users
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_USERNAME_REQUIRED = False
ACCOUNT_EMAIL_VERIFICATION = "optional"
ACCOUNT_CONFIRM_EMAIL_ON_GET = True
ACCOUNT_LOGIN_ON_EMAIL_CONFIRMATION = True
ACCOUNT_UNIQUE_EMAIL = True
LOGIN_REDIRECT_URL = "users:redirect"
ACCOUNT_ADAPTER = "users.adapters.AccountAdapter"
SOCIALACCOUNT_ADAPTER = "users.adapters.SocialAccountAdapter"
ACCOUNT_ALLOW_REGISTRATION = env.bool("ACCOUNT_ALLOW_REGISTRATION", True)
SOCIALACCOUNT_ALLOW_REGISTRATION = env.bool("SOCIALACCOUNT_ALLOW_REGISTRATION", True)
REST_AUTH_SERIALIZERS = {
# Replace password reset serializer to fix 500 error
"PASSWORD_RESET_SERIALIZER": "home.api.v1.serializers.PasswordSerializer",
}
REST_AUTH_REGISTER_SERIALIZERS = {
# Use custom serializer that has no username and matches web signup
"REGISTER_SERIALIZER": "home.api.v1.serializers.SignupSerializer",
}
# Custom user model
AUTH_USER_MODEL = "users.User"
EMAIL_HOST = env.str("EMAIL_HOST", "smtp.sendgrid.net")
EMAIL_HOST_USER = env.str("SENDGRID_USERNAME", "")
EMAIL_HOST_PASSWORD = env.str("SENDGRID_PASSWORD", "")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# AWS S3 config
AWS_ACCESS_KEY_ID = env.str("AWS_ACCESS_KEY_ID", "")
AWS_SECRET_ACCESS_KEY = env.str("AWS_SECRET_ACCESS_KEY", "")
AWS_STORAGE_BUCKET_NAME = env.str("AWS_STORAGE_BUCKET_NAME", "")
AWS_STORAGE_REGION = env.str("AWS_STORAGE_REGION", "")
USE_S3 = (
AWS_ACCESS_KEY_ID and
AWS_SECRET_ACCESS_KEY and
AWS_STORAGE_BUCKET_NAME and
AWS_STORAGE_REGION
)
if USE_S3:
AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
DEFAULT_FILE_STORAGE = env.str(
"DEFAULT_FILE_STORAGE", "home.storage_backends.MediaStorage"
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'mediafiles')
# Swagger settings for api docs
SWAGGER_SETTINGS = {
"DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}
if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
# output email to console instead of sending
if not DEBUG:
logging.warning("You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails.")
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
| [
"[email protected]"
] | |
41dc105eb07adb417e1c322ec9271ad8365af2c2 | 9a06c8ab42e0fbce88f06a1bd7237c4d5cae592a | /code/python_lesson/runoob/09摄氏度转华氏度.py | 783060706ad78ee49e750443b5c09375203fe90d | [
"MIT"
] | permissive | lxl0928/learning_python | 19040ca3ae92e5c07a1e813c707d625aa0ba8cb2 | ff0c6457186e7aa5b6ed9cafaea1dba616ce493a | refs/heads/master | 2023-05-13T13:59:28.815564 | 2021-03-22T02:08:53 | 2021-03-22T02:08:53 | 162,232,496 | 4 | 1 | MIT | 2023-05-01T20:15:55 | 2018-12-18T04:59:15 | Python | UTF-8 | Python | false | false | 342 | py | #! /usr/bin/python3
# -*- coding: utf-8 -*-
# Date: 2016.08.02
# Filename: 09.py
# Author: Timilong
# 用户输入摄氏温度
celsius = float(input("请输入摄氏温度: "))
# 计算华氏温度
fahrenheit = (celsius * 1.8) + 32
# 打印华氏温度
print("%0.1f摄氏温度转化为华氏温度为%0.1f" % (celsius, fahrenheit))
| [
"[email protected]"
] | |
1337f2878c504d9d15a39baca3d7e460d62f6bc4 | c422cfdcd0303395b62a383611dca19236ea0e15 | /core/migrations/0009_diaperchange_amount.py | 991260de5bff332950e762549154f4f031abc2fc | [
"BSD-2-Clause-Views",
"BSD-2-Clause"
] | permissive | Alan01252/babybuddy | c18d26769458fbfd60d7e5493c1fab911d624ddd | 5382527dc84530fe56a65c7452620bba41bfd668 | refs/heads/master | 2022-12-18T17:09:21.064011 | 2020-09-16T11:33:07 | 2020-09-16T11:33:07 | 291,678,434 | 1 | 0 | BSD-2-Clause | 2020-08-31T09:57:07 | 2020-08-31T09:57:06 | null | UTF-8 | Python | false | false | 419 | py | # Generated by Django 3.0.2 on 2020-01-26 21:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_auto_20190607_1422'),
]
operations = [
migrations.AddField(
model_name='diaperchange',
name='amount',
field=models.FloatField(blank=True, null=True, verbose_name='Amount'),
),
]
| [
"[email protected]"
] | |
d289d25acaf78e7bb51c689c1de4b4495a3bbd9a | 244ecfc2017a48c70b74556be8c188e7a4815848 | /res/scripts/client/gui/scaleform/daapi/view/lobby/fortifications/fortdisabledefenceperiodwindow.py | d81f15c0ee94a51d408d7c2853b5cbd29a9df04e | [] | no_license | webiumsk/WOT-0.9.12 | c1e1259411ba1e6c7b02cd6408b731419d3174e5 | 5be5fd9186f335e7bae88c9761c378ff5fbf5351 | refs/heads/master | 2021-01-10T01:38:36.523788 | 2015-11-18T11:33:37 | 2015-11-18T11:33:37 | 46,414,438 | 1 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,109 | py | # 2015.11.18 11:54:00 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/fortifications/FortDisableDefencePeriodWindow.py
import BigWorld
from adisp import process
from gui import SystemMessages
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortSoundController import g_fortSoundController
from gui.Scaleform.daapi.view.lobby.fortifications.fort_utils.FortViewHelper import FortViewHelper
from gui.Scaleform.daapi.view.meta.FortDisableDefencePeriodWindowMeta import FortDisableDefencePeriodWindowMeta
from gui.Scaleform.locale.FORTIFICATIONS import FORTIFICATIONS as ALIAS, FORTIFICATIONS
from gui.Scaleform.locale.SYSTEM_MESSAGES import SYSTEM_MESSAGES
from gui.shared.formatters import text_styles
from gui.shared.fortifications.context import DefencePeriodCtx
from helpers import i18n
class FortDisableDefencePeriodWindow(FortDisableDefencePeriodWindowMeta, FortViewHelper):
def __init__(self, _ = None):
super(FortDisableDefencePeriodWindow, self).__init__()
self.__inputChecker = None
self.__controlNumber = self.fortCtrl.getFort().getTotalDefRes()
return
def initInputChecker(self):
self.__inputChecker.errorMsg = self.__makeInputCheckerError()
self.__inputChecker.questionTitle = self.__makeInputCheckerTitle()
self.__inputChecker.questionBody = self.__makeInputCheckerBody()
self.__inputChecker.setControlNumbers(self.__controlNumber, BigWorld.wg_getIntegralFormat)
def onWindowClose(self):
self.destroy()
def onClickApplyButton(self):
self.__setup()
def onDefenceHourShutdown(self):
if self.fortCtrl.getFort().isDefenceHourShutDown():
self.destroy()
def onShutdownDowngrade(self):
self.destroy()
def _onRegisterFlashComponent(self, viewPy, alias):
self.__inputChecker = viewPy
self.initInputChecker()
def _populate(self):
super(FortDisableDefencePeriodWindow, self)._populate()
self.startFortListening()
if self.fortCtrl.getFort().isDefenceHourShutDown():
return self.destroy()
self.__makeMainData()
def _dispose(self):
self.__inputChecker = None
self.stopFortListening()
super(FortDisableDefencePeriodWindow, self)._dispose()
return
def __makeInputCheckerError(self):
return text_styles.error(i18n.makeString(ALIAS.DEMOUNTBUILDING_ERRORMESSAGE))
def __makeInputCheckerTitle(self):
return text_styles.middleTitle(i18n.makeString(ALIAS.DISABLEDEFENCEPERIODWINDOW_INPUTCHECKER_TITLE))
def __makeInputCheckerBody(self):
controlNumber = BigWorld.wg_getIntegralFormat(self.__controlNumber)
controlNumber = text_styles.middleTitle(str(controlNumber))
questionBody = text_styles.standard(i18n.makeString(ALIAS.DISABLEDEFENCEPERIODWINDOW_INPUTCHECKER_BODY, controlNumber=controlNumber))
return questionBody
def __makeMainData(self):
titleText = text_styles.main(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_TITLE))
redText = text_styles.error(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_BODYREDTEXT))
bodyText = text_styles.main(i18n.makeString(FORTIFICATIONS.DISABLEDEFENCEPERIODWINDOW_MAINTEXT_BODY, redText=redText))
self.as_setDataS({'titleText': titleText,
'bodyText': bodyText})
@process
def __setup(self):
result = yield self.fortProvider.sendRequest(DefencePeriodCtx(waitingID='fort/settings'))
if result:
g_fortSoundController.playDefencePeriodDeactivated()
SystemMessages.g_instance.pushI18nMessage(SYSTEM_MESSAGES.FORTIFICATION_DEFENCEHOURDEACTIVATED, type=SystemMessages.SM_TYPE.Warning)
self.destroy()
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\scaleform\daapi\view\lobby\fortifications\fortdisabledefenceperiodwindow.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2015.11.18 11:54:00 Střední Evropa (běžný čas)
| [
"[email protected]"
] | |
1d6d89dd402a1014ea003cc594770dd2a2538c49 | 6203b9132af8f78c6cb12242bd223fa17d14f31e | /leetcode/problems/556.py | 82b5c6ed99fea3e9b4e3d051b24c25cb28c78248 | [] | no_license | joshuap233/algorithms | 82c608d7493b0d21989b287a2e246ef739e60443 | dc68b883362f3ddcfb433d3d83d1bbf925bbcf02 | refs/heads/master | 2023-08-23T12:44:42.675137 | 2021-09-28T02:37:01 | 2021-09-28T02:37:01 | 230,285,450 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 686 | py | # https://leetcode-cn.com/problems/next-greater-element-iii/
# 556. 下一个更大元素 III
class Solution:
"""与下一个字典序的写法一样"""
MAXI = 2 ** 31 - 1
def nextGreaterElement(self, n: int) -> int:
if n <= 9:
return -1
s = list(str(n))
for i in range(len(s) - 2, -1, -1):
if s[i] < s[i + 1]:
break
else:
return -1
for j in range(len(s) - 1, i, -1):
if s[j] > s[i]:
break
s[i], s[j] = s[j], s[i]
s[i + 1:] = s[len(s) - 1:i:-1] # 逆序
ret = int(''.join(s))
return ret if ret <= self.MAXI else -1
| [
"[email protected]"
] | |
99ccf909e1b7071804da551122f2a3d7c85bb020 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/192/usersdata/273/70732/submittedfiles/al6.py | 62617a79d4eba687c0a500c294d12922ab0a48f2 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 185 | py | # -*- coding: utf-8 -*-
i= 2
c= 0
n= int(input('digite o valor de n: '))
while(i<n):
if (n%i)==0:
c=c+1
print(i)
i=i+1
if i==0
print(' primo')
if i>0
print('NAO PRIMO')) | [
"[email protected]"
] | |
6ec95f89ce993de65e468f212786248298f66665 | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/d79b6c84068e6f5fe995a74e39cd3f63d86bb294-<parse_lldp_intf>-bug.py | ddbc6fc1aa3322f069ebbc9cb05db83582c1618e | [] | no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | def parse_lldp_intf(self, data):
match = re.search('Interface:\\s*(\\S+)', data, re.M)
if match:
return match.group(1) | [
"[email protected]"
] | |
7a63e1a4a6717671c7176bf91eda13961f655536 | 99094cc79bdbb69bb24516e473f17b385847cb3a | /72.Edit Distance/Solution.py | 11b4f12f5ee723dcde3137a39b89d6242e6e0462 | [] | no_license | simonxu14/LeetCode_Simon | 7d389bbfafd3906876a3f796195bb14db3a1aeb3 | 13f4595374f30b482c4da76e466037516ca3a420 | refs/heads/master | 2020-04-06T03:33:25.846686 | 2016-09-10T00:23:11 | 2016-09-10T00:23:11 | 40,810,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 696 | py | __author__ = 'Simon'
class Solution(object):
def minDistance(self, word1, word2):
"""
:type word1: str
:type word2: str
:rtype: int
"""
dp = [[0 for j in range(len(word2)+1)] for i in range(len(word1)+1)]
for j in range(len(word2)+1):
dp[0][j] = j
for i in range(len(word1)+1):
dp[i][0] = i
for i in range(1,len(word1)+1):
for j in range(1,len(word2)+1):
if word1[i-1] == word2[j-1]:
dp[i][j] = dp[i-1][j-1]
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1]) + 1
return dp[len(word1)][len(word2)] | [
"[email protected]"
] | |
b9c5ac147c500ee983edcc9fe10950a1a98fd9ce | 3d7039903da398ae128e43c7d8c9662fda77fbdf | /database/JavaScript/juejin_2514.py | eb9dae4e3fea3f75cbab068f9fcccaaa3a6b1488 | [] | no_license | ChenYongChang1/spider_study | a9aa22e6ed986193bf546bb567712876c7be5e15 | fe5fbc1a5562ff19c70351303997d3df3af690db | refs/heads/master | 2023-08-05T10:43:11.019178 | 2021-09-18T01:30:22 | 2021-09-18T01:30:22 | 406,727,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 64,994 | py | {"err_no": 0, "err_msg": "success", "data": [{"article_id": "6996311879895547912", "article_info": {"article_id": "6996311879895547912", "user_id": "1565333361004279", "category_id": "6809637767543259144", "tag_ids": [6809640407484334093, 6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "记录寄生继承和ES6 Class继承", "brief_content": "寄生组合继承 这是一种接近完美(接下来会展开)的继承方式,先来看其它一些继承方式有助于记忆; 原型链继承 这就是原型链继承, 优点:继承了父类的模板和原型 缺点: 子类修改了父类的引用类型的变量,会影", "is_english": 0, "is_original": 1, "user_index": 2.038840227317261, "original_type": 0, "original_author": "", "content": "", "ctime": "1628955897", "mtime": "1628997899", "rtime": "1628997899", "draft_id": "6994787042970304519", "view_count": 59, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00093716, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1565333361004279", "user_name": "401", "company": "摇浆部", "job_title": "前端开发", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/mosaic-legacy/3793/3131589739~300x300.image", "level": 1, "description": "", "followee_count": 36, "follower_count": 3, "post_article_count": 12, "digg_article_count": 175, "got_digg_count": 26, "got_view_count": 808, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 34, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6996311879895547912, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904165060116487", "article_info": {"article_id": "6844904165060116487", "user_id": "1996368846268334", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844904165060116487", "cover_image": "", "is_gfw": 0, "title": "【JS】节点截图的最终解决方案dom-to-image与html2canvas", "brief_content": "...", "is_english": 0, "is_original": 1, "user_index": 0.33339625134645, "original_type": 0, "original_author": "", "content": "", "ctime": "1590050167", "mtime": "1598574980", "rtime": "1590050653", "draft_id": "6845076786070421511", "view_count": 3295, "collect_count": 13, "digg_count": 13, "comment_count": 2, "hot_index": 179, "is_hot": 0, "rank_index": 0.00093654, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1996368846268334", "user_name": "清一色天空", "company": "摸鱼划水公司", "job_title": "前端开发", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/9/24/16d62c7958563afa~tplv-t2oaga2asx-image.image", "level": 3, "description": "架构神聊者以及炫酷canvas、css3的好奇宝宝", "followee_count": 19, "follower_count": 283, "post_article_count": 15, "digg_article_count": 41, "got_digg_count": 2224, "got_view_count": 146734, "post_shortmsg_count": 25, "digg_shortmsg_count": 17, "isfollowed": false, "favorable_author": 0, "power": 3091, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904165060116487, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904194189557773", "article_info": {"article_id": "6844904194189557773", "user_id": "3966693685068510", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "纯JS打造级联选择器控件,仿ElementUi(附源码)", "brief_content": "公司之前有过Vue开发的项目,用到了ElementUi的级联选择器控件。不得了了,产品爸爸们开始作妖了,哎呦不错哦,我要用它到我这个项目里(项目以Js + Php为架构,前后端不分离)。 “这个需求很简单,怎么实现我不管。” 既然battle不过,那没办法,只能写一个纯Js插件…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1592391713", "mtime": "1605850413", "rtime": "1592449129", "draft_id": "6845076826671300616", "view_count": 2553, "collect_count": 24, "digg_count": 32, "comment_count": 7, "hot_index": 166, "is_hot": 0, "rank_index": 0.00093618, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3966693685068510", "user_name": "大王叫我来爬山", "company": "北京亿欧网盟科技有限公司", "job_title": "前端开发工程师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/7/12/16be4e3755776605~tplv-t2oaga2asx-image.image", "level": 2, "description": "擅长前端各项技能及框架", "followee_count": 17, "follower_count": 40, "post_article_count": 5, "digg_article_count": 128, "got_digg_count": 144, "got_view_count": 11259, "post_shortmsg_count": 0, "digg_shortmsg_count": 1, "isfollowed": false, "favorable_author": 0, "power": 256, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904194189557773, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6996480955292598302", "article_info": {"article_id": "6996480955292598302", "user_id": "2348212567683421", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "进阶教程 6. 正则应用", "brief_content": "上文介绍了正则的入门,本文将接着说正则虽然很傲娇,但是她也是风情万种,她能帮你解决很多复杂问题,不来看看么", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628995457", "mtime": "1629011180", "rtime": "1629011180", "draft_id": "6996475762391908360", "view_count": 47, "collect_count": 0, "digg_count": 3, "comment_count": 0, "hot_index": 5, "is_hot": 0, "rank_index": 0.00093599, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2348212567683421", "user_name": "和雍", "company": "滴滴出行", "job_title": "前端开发", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/91f7199852a14f4febd3e2ced75e03c5~300x300.image", "level": 2, "description": "我的野摩托经常喝多了", "followee_count": 19, "follower_count": 9, "post_article_count": 32, "digg_article_count": 78, "got_digg_count": 181, "got_view_count": 2740, "post_shortmsg_count": 1, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 208, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6996480955292598302, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "7000191211721981960", "article_info": {"article_id": "7000191211721981960", "user_id": "317104195124526", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/3a8e4e9c6e094b979800ee5957d7d4f7~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "深入理解 JavaScript 原型", "brief_content": "JavaScript 中有个特殊的存在:对象。每个对象还都拥有一个原型对象,并可以从中继承方法和属性。本文已参与掘金创作者训练营第三期「话题写作」赛道", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1629859072", "mtime": "1630315146", "rtime": "1629872649", "draft_id": "7000185291805818893", "view_count": 42, "collect_count": 0, "digg_count": 1, "comment_count": 0, "hot_index": 3, "is_hot": 0, "rank_index": 0.00093556, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "317104195124526", "user_name": "chengdwu", "company": "", "job_title": "web前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/c59b4e44c8ca03af03acceffc1a263a2~300x300.image", "level": 1, "description": "", "followee_count": 3, "follower_count": 1, "post_article_count": 6, "digg_article_count": 6, "got_digg_count": 15, "got_view_count": 1557, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 30, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 7000191211721981960, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904160198918158", "article_info": {"article_id": "6844904160198918158", "user_id": "747323640250926", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844904160198918158", "cover_image": "", "is_gfw": 0, "title": "你可能不太了解的JSON", "brief_content": "这个知识点面试中被问的概率虽然很低,但是也有可能会问。 注意:JSON的key一定要用双引号,以及value如果是字符串也一定要用双引号。 原生的JSON对象得到以下浏览器支持。", "is_english": 0, "is_original": 1, "user_index": 5.8380451654058, "original_type": 0, "original_author": "", "content": "", "ctime": "1589561405", "mtime": "1598961882", "rtime": "1589721783", "draft_id": "6845076777862184968", "view_count": 3002, "collect_count": 8, "digg_count": 21, "comment_count": 4, "hot_index": 175, "is_hot": 0, "rank_index": 0.00093548, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "747323640250926", "user_name": "iskeepingon", "company": "", "job_title": "", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/fc685c76669816629c70836efbce4242~300x300.image", "level": 2, "description": "", "followee_count": 0, "follower_count": 39, "post_article_count": 30, "digg_article_count": 35, "got_digg_count": 154, "got_view_count": 30555, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 459, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904160198918158, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903924172865543", "article_info": {"article_id": "6844903924172865543", "user_id": "430664257386558", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903924172865543", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/8/25/16cc9264d8fa31b1~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "Typescript版图解Functor , Applicative 和 Monad", "brief_content": "本文是经典的Functors, Applicatives, And Monads In Pictures的Typescript翻译版本。 Functor/Applicative/Monad是函数式编程中的一些比较‘基础’的概念,反正我是不认同‘基础’这个说法的,笔者也阅读过很多…", "is_english": 0, "is_original": 1, "user_index": 11.014284655693, "original_type": 0, "original_author": "", "content": "", "ctime": "1566742852", "mtime": "1600063966", "rtime": "1566783690", "draft_id": "6845076429311311886", "view_count": 4463, "collect_count": 51, "digg_count": 77, "comment_count": 8, "hot_index": 308, "is_hot": 0, "rank_index": 0.00093545, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "430664257386558", "user_name": "荒山", "company": "惟客", "job_title": "前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/12/20/16f22fbb0b9c7925~tplv-t2oaga2asx-image.image", "level": 6, "description": "草根前端", "followee_count": 74, "follower_count": 11368, "post_article_count": 47, "digg_article_count": 629, "got_digg_count": 16455, "got_view_count": 731610, "post_shortmsg_count": 35, "digg_shortmsg_count": 55, "isfollowed": false, "favorable_author": 1, "power": 23771, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903924172865543, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903700176044040", "article_info": {"article_id": "6844903700176044040", "user_id": "219558055512414", "category_id": "6809637767543259144", "tag_ids": [6809640369764958215, 6809640398105870343, 6809640528267706382, 6809641048927633415], "visible_level": 0, "link_url": "https://juejin.im/post/6844903700176044040", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/10/29/166bd55ef487de3c~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "Vue SPA 项目webpack打包优化指南", "brief_content": "最近一个小伙伴问我他们公司的Vue后台项目怎么首次加载要十多秒太慢了,有什么能优化的,于是乎我打开了他们的网站,发现主要耗时在加载vendor.js文件这个文件高达2M,于是乎我就拿来他们的代码看看,进行了一番折腾。最终还是取得了不错的效果。 对于网页性能,如何提升加载速度、等…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1540538178", "mtime": "1599636214", "rtime": "1540547063", "draft_id": "6845075639045718023", "view_count": 7445, "collect_count": 165, "digg_count": 122, "comment_count": 2, "hot_index": 496, "is_hot": 0, "rank_index": 0.00093544, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "219558055512414", "user_name": "AKing", "company": "前端打工人", "job_title": "前端打工人", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/mirror-assets/168e086a15081d2a8d5~tplv-t2oaga2asx-image.image", "level": 3, "description": "代码搬运工", "followee_count": 18, "follower_count": 686, "post_article_count": 22, "digg_article_count": 81, "got_digg_count": 1021, "got_view_count": 80820, "post_shortmsg_count": 0, "digg_shortmsg_count": 2, "isfollowed": false, "favorable_author": 0, "power": 1816, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546498, "tag_id": "6809640369764958215", "tag_name": "Vue.js", "color": "#41B883", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/7b5c3eb591b671749fee.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234520, "mtime": 1631693194, "id_type": 9, "tag_alias": "", "post_article_count": 31257, "concern_user_count": 313520}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546614, "tag_id": "6809640528267706382", "tag_name": "Webpack", "color": "#6F94DB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/73e856b07f83b4231c1e.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1440920866, "mtime": 1631692726, "id_type": 9, "tag_alias": "", "post_article_count": 6704, "concern_user_count": 204077}, {"id": 2546989, "tag_id": "6809641048927633415", "tag_name": "CDN", "color": "#000000", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f20d8ce529685521d23c.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1489954158, "mtime": 1631638663, "id_type": 9, "tag_alias": "", "post_article_count": 651, "concern_user_count": 11068}], "user_interact": {"id": 6844903700176044040, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904013247283214", "article_info": {"article_id": "6844904013247283214", "user_id": "4054654612943303", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844904013247283214", "cover_image": "", "is_gfw": 0, "title": "前端UI设计稿对比工具 - chrome+webpack插件", "brief_content": "(切图仔)前端开发者一大重要的职责就是将UI画稿转化为实际可用的页面,效果图的还原度在相当大的程度上决定了UI和PM的满意度一般情况下,拿到设计稿后,懒散点的可能直接看图软件打开,肉眼测距就开搞了,负", "is_english": 0, "is_original": 1, "user_index": 10.697075171448, "original_type": 0, "original_author": "", "content": "", "ctime": "1575431485", "mtime": "1600270546", "rtime": "1575436078", "draft_id": "6845076564409843720", "view_count": 4125, "collect_count": 49, "digg_count": 42, "comment_count": 6, "hot_index": 254, "is_hot": 0, "rank_index": 0.00093541, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4054654612943303", "user_name": "清夜", "company": "字节跳动", "job_title": "前端挖坑学院首席JS打字员", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/3edac05f29d8ed4e7ffdf499fda846bb~300x300.image", "level": 4, "description": "HTML堆砌者、 CSS 表演艺术家、高级IF-ELSE开发工程师、API调用专家、后端接口测试专员、文档制造者、bug路由器", "followee_count": 23, "follower_count": 951, "post_article_count": 47, "digg_article_count": 72, "got_digg_count": 3399, "got_view_count": 216915, "post_shortmsg_count": 9, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 1, "power": 5572, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904013247283214, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6992959237248778254", "article_info": {"article_id": "6992959237248778254", "user_id": "1028798616438151", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p6-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/7a1086006730433f8efa797abcfea2d3~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "JavaScript从根源了解深浅拷贝问题", "brief_content": "从JavaScript的变量开始介绍,逐步了解为什么会出现深浅拷贝问题,引用值和原始值的区别,以及如何解决。", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628175238", "mtime": "1628241274", "rtime": "1628225566", "draft_id": "6992957978638827551", "view_count": 64, "collect_count": 1, "digg_count": 4, "comment_count": 0, "hot_index": 7, "is_hot": 0, "rank_index": 0.00093534, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1028798616438151", "user_name": "我家没有洗发水", "company": "", "job_title": "web前端", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/3/29/17126d9ca15770fc~tplv-t2oaga2asx-image.image", "level": 1, "description": "正在学习前端,在这里记录一些笔记ψ(._. )>", "followee_count": 6, "follower_count": 2, "post_article_count": 7, "digg_article_count": 16, "got_digg_count": 22, "got_view_count": 332, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 25, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6992959237248778254, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6992938207230820389", "article_info": {"article_id": "6992938207230820389", "user_id": "474636479897303", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343, 6809640407484334093], "visible_level": 0, "link_url": "", "cover_image": "", "is_gfw": 0, "title": "Event Loop事件循环", "brief_content": "这是我参与8月更文挑战的第5天,活动详情查看:8月更文挑战 1. JavaScript为什么是单线程的? JavaScript的单线程,与它的用途有关。作为浏览器脚本语言,JavaScript的主要用", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1628170380", "mtime": "1628225154", "rtime": "1628225154", "draft_id": "6992937858017263623", "view_count": 75, "collect_count": 1, "digg_count": 4, "comment_count": 0, "hot_index": 7, "is_hot": 0, "rank_index": 0.00093519, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "474636479897303", "user_name": "敲代码有瘾", "company": "", "job_title": "前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/e1d7446b60cec3cceb23431f49737188~300x300.image", "level": 1, "description": "敲代码有瘾", "followee_count": 9, "follower_count": 6, "post_article_count": 7, "digg_article_count": 35, "got_digg_count": 30, "got_view_count": 665, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 36, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}], "user_interact": {"id": 6992938207230820389, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844904104582447118", "article_info": {"article_id": "6844904104582447118", "user_id": "4265760849141943", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844904104582447118", "cover_image": "", "is_gfw": 0, "title": "JavaScript常见笔试题(持续更新)", "brief_content": "1. Promise模拟实现 2. Promise.all实现 4. call实现 5. apply实现 6. bind实现 7. 继承方法", "is_english": 0, "is_original": 1, "user_index": 7.0590559426532, "original_type": 0, "original_author": "", "content": "", "ctime": "1585294947", "mtime": "1598558636", "rtime": "1585295364", "draft_id": "6845076703862063111", "view_count": 3065, "collect_count": 105, "digg_count": 43, "comment_count": 3, "hot_index": 199, "is_hot": 0, "rank_index": 0.00093475, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4265760849141943", "user_name": "ren", "company": "", "job_title": "", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2020/2/19/1705dbbcee2b3c54~tplv-t2oaga2asx-image.image", "level": 2, "description": "", "followee_count": 5, "follower_count": 28, "post_article_count": 6, "digg_article_count": 1, "got_digg_count": 105, "got_view_count": 7560, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 180, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844904104582447118, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6879606179924803591", "article_info": {"article_id": "6879606179924803591", "user_id": "1873223546578589", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/489b4fa661ef47c0aa11853d29883ee3~tplv-k3u1fbpfcp-zoom-1.image", "is_gfw": 0, "title": "JavaScript 类型 — 重学 JavaScript", "brief_content": "JavaScript 中最小的结构,同学们已知的有什么呢?我想同学们都应该会想到一些东西,比如一些关键字,数字 123,或者 String 字符等等。这里我们从最小的单位,字面值和运行时类型开始讲起。 这里分为语法(Grammer)和运行时(Runtime)两个部分。 有一个设…", "is_english": 0, "is_original": 1, "user_index": 8.408036803013594, "original_type": 0, "original_author": "", "content": "", "ctime": "1601783395", "mtime": "1601785970", "rtime": "1601785970", "draft_id": "6877042786198257672", "view_count": 1743, "collect_count": 15, "digg_count": 21, "comment_count": 0, "hot_index": 108, "is_hot": 0, "rank_index": 0.00093492, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "1873223546578589", "user_name": "三钻", "company": "微信搜:技术银河", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/b167b7a36ca8da3225bf4ffb3257aaa8~300x300.image", "level": 3, "description": "专心、专注、专研,与行业中的同学们一起终生学习", "followee_count": 82, "follower_count": 729, "post_article_count": 51, "digg_article_count": 98, "got_digg_count": 1809, "got_view_count": 98422, "post_shortmsg_count": 8, "digg_shortmsg_count": 7, "isfollowed": false, "favorable_author": 0, "power": 2793, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6879606179924803591, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6941761567046238245", "article_info": {"article_id": "6941761567046238245", "user_id": "3104676565755117", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "", "cover_image": "https://p1-juejin.byteimg.com/tos-cn-i-k3u1fbpfcp/8e5b1513771746a98eb2d0ac97c8c9d5~tplv-k3u1fbpfcp-watermark.image", "is_gfw": 0, "title": "报表可视化搭建平台 - 筛选器联动优化 | 项目复盘", "brief_content": "项目目标: 报表通过可视化搭建的方式,来缩短数据报表开发周期。减少研发同学的依赖,解放生产力。支持 PC 端和移动端展示。 目标用户: BI 分析师、HR 或者效能改进部门。 本身是整个可视化搭建生态中的一员,整个可视化搭建生态底层引擎由单独一个前端小组开发和维护,然后再和业务…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1616254933", "mtime": "1616375334", "rtime": "1616293822", "draft_id": "6941647622163660807", "view_count": 623, "collect_count": 7, "digg_count": 17, "comment_count": 1, "hot_index": 49, "is_hot": 0, "rank_index": 0.00093475, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3104676565755117", "user_name": "CAI", "company": "xxx", "job_title": "前端", "avatar_large": "https://sf1-ttcdn-tos.pstatp.com/img/user-avatar/03f5fe02198e30db440a52d278833ede~300x300.image", "level": 2, "description": "", "followee_count": 50, "follower_count": 29, "post_article_count": 8, "digg_article_count": 272, "got_digg_count": 74, "got_view_count": 6126, "post_shortmsg_count": 7, "digg_shortmsg_count": 12, "isfollowed": false, "favorable_author": 0, "power": 135, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6941761567046238245, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903513613418503", "article_info": {"article_id": "6844903513613418503", "user_id": "4336129589120072", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://github.com/zuopf769/notebook/blob/master/fe/前端全(无)埋点之页面停留时长统计/README.md", "cover_image": "", "is_gfw": 0, "title": "前端全(无)埋点之页面停留时长统计", "brief_content": "本文讲解了传统的通过beforunload或者unload事件发送页面停留时长的时候丢点的问题;罗列了几种解决问题的思路。", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1511510436", "mtime": "1598438455", "rtime": "1511510436", "draft_id": "0", "view_count": 10999, "collect_count": 77, "digg_count": 153, "comment_count": 8, "hot_index": 710, "is_hot": 0, "rank_index": 0.00093465, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "4336129589120072", "user_name": "zuopf769", "company": "yonyou->baidu->ofo->mtdp", "job_title": "fe", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/mosaic-legacy/3792/5112637127~300x300.image", "level": 2, "description": "前端、node", "followee_count": 334, "follower_count": 1073, "post_article_count": 36, "digg_article_count": 112, "got_digg_count": 2056, "got_view_count": 202537, "post_shortmsg_count": 0, "digg_shortmsg_count": 0, "isfollowed": false, "favorable_author": 0, "power": 832, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903513613418503, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903728990912525", "article_info": {"article_id": "6844903728990912525", "user_id": "3755587449653150", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903728990912525", "cover_image": "", "is_gfw": 0, "title": "你的 JS 代码本可以更加优雅", "brief_content": "有时感觉挺有趣的是在群里聊天时的自嘲,「xx 项目在经过我一年的不断努力下,终于变得不可维护」。个人认为,维护是一件比开发更富挑战性的事情,前人的代码是否规范优雅会很直接地影响我们的工作效率和心情。 所以,我们更要时刻地去注意我们代码的质量,也许你的代码已经足够规范,但在某种程…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1543720517", "mtime": "1598481424", "rtime": "1543722532", "draft_id": "6845075777390657543", "view_count": 5352, "collect_count": 190, "digg_count": 176, "comment_count": 30, "hot_index": 473, "is_hot": 0, "rank_index": 0.00093455, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3755587449653150", "user_name": "淘淘笙悦", "company": "", "job_title": "野生小前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/699d994be9e4e06d321c36844f33c52a~300x300.image", "level": 3, "description": "", "followee_count": 8, "follower_count": 248, "post_article_count": 21, "digg_article_count": 37, "got_digg_count": 2224, "got_view_count": 98870, "post_shortmsg_count": 1, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 3212, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903728990912525, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903711991414791", "article_info": {"article_id": "6844903711991414791", "user_id": "940837682306830", "category_id": "6809637767543259144", "tag_ids": [6809640357354012685, 6809640398105870343, 6809640407484334093, 6809640614175604744], "visible_level": 0, "link_url": "https://juejin.im/post/6844903711991414791", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/11/12/167066041df474b9~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "[译] React 的今天和明天 I —— 现状和问题", "brief_content": "早上好。大家好,欢迎来到 React 大会。今天来到这里我感到非常激动。我非常高兴可以给你们做开场演讲。 我是 Sophie Alpert,个人主页是 sophiebits.com。我是 Facebook 的 React 核心小组的开发经理。 你们正在使用的 React 做的很…", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1542000112", "mtime": "1599642818", "rtime": "1542010625", "draft_id": "6845075649419870221", "view_count": 5973, "collect_count": 143, "digg_count": 174, "comment_count": 13, "hot_index": 485, "is_hot": 0, "rank_index": 0.00093454, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "940837682306830", "user_name": "清秋", "company": "公众号:Frontend Radio", "job_title": "Web前端", "avatar_large": "https://sf6-ttcdn-tos.pstatp.com/img/user-avatar/be943b52b5091019bf92eeec817413c9~300x300.image", "level": 3, "description": "你好,我是清秋,一个有着教师梦的 Web 前端非典型程序员。业余画家、设计师、写手,PMP,后端一般,算法还可,数据挖掘背景。北邮硕士毕业后一直在某股份制银行软件开发部工作,一晃已经五年了。", "followee_count": 232, "follower_count": 2944, "post_article_count": 41, "digg_article_count": 227, "got_digg_count": 1820, "got_view_count": 135794, "post_shortmsg_count": 64, "digg_shortmsg_count": 53, "isfollowed": false, "favorable_author": 0, "power": 3177, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546490, "tag_id": "6809640357354012685", "tag_name": "React.js", "color": "#61DAFB", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f655215074250f10f8d4.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234367, "mtime": 1631692935, "id_type": 9, "tag_alias": "", "post_article_count": 16999, "concern_user_count": 226420}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}, {"id": 2546526, "tag_id": "6809640407484334093", "tag_name": "前端", "color": "#60ADFF", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/bac28828a49181c34110.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 1, "ctime": 1435971546, "mtime": 1631693180, "id_type": 9, "tag_alias": "", "post_article_count": 88830, "concern_user_count": 527705}, {"id": 2546676, "tag_id": "6809640614175604744", "tag_name": "掘金翻译计划", "color": "#0081ff", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/95f7e8be776556ab8d82.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1454716787, "mtime": 1631689800, "id_type": 9, "tag_alias": "", "post_article_count": 2502, "concern_user_count": 42848}], "user_interact": {"id": 6844903711991414791, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6854573221983813645", "article_info": {"article_id": "6854573221983813645", "user_id": "2594503169948727", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6854573221983813645", "cover_image": "", "is_gfw": 0, "title": "javascript 事件流和事件委托", "brief_content": "javascript事件流和事件委托DOM事件流事件对象👉event:MDN传送门事件类型事件委托👉彻底弄懂JS事件委托的概念和作用相关文章推荐:👉EventLoop事件循环机制(浏览器)👉javas", "is_english": 0, "is_original": 1, "user_index": 2.7992049380886, "original_type": 0, "original_author": "", "content": "", "ctime": "1595923488", "mtime": "1599102471", "rtime": "1595927245", "draft_id": "6854812682176954382", "view_count": 2445, "collect_count": 27, "digg_count": 19, "comment_count": 3, "hot_index": 144, "is_hot": 0, "rank_index": 0.00093428, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2594503169948727", "user_name": "zlevai", "company": "", "job_title": "@前端", "avatar_large": "https://sf3-ttcdn-tos.pstatp.com/img/user-avatar/ab2aa610532cdb960c465521032c3b27~300x300.image", "level": 2, "description": "知新温故事", "followee_count": 91, "follower_count": 22, "post_article_count": 40, "digg_article_count": 415, "got_digg_count": 102, "got_view_count": 20776, "post_shortmsg_count": 1, "digg_shortmsg_count": 12, "isfollowed": false, "favorable_author": 0, "power": 309, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6854573221983813645, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903619582509063", "article_info": {"article_id": "6844903619582509063", "user_id": "3051900006063838", "category_id": "6809637767543259144", "tag_ids": [6809640361531539470, 6809640398105870343], "visible_level": 0, "link_url": "https://bailinlin.github.io/2018/06/08/node-notes/", "cover_image": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/6/12/163f27e92cc8d28b~tplv-t2oaga2asx-image.image", "is_gfw": 0, "title": "精读《深入浅出Node.js》", "brief_content": "从不同的视角介绍了 Node 内在的特点和结构。由首章Node 介绍为索引,涉及Node 的各个方面,主要内容包含模块机制的揭示、异步I/O 实现原理的展现、异步编程的探讨、内存控制的介绍、二进制数据Buffer 的细节、Node 中的网络编程...", "is_english": 0, "is_original": 0, "user_index": 0, "original_type": 1, "original_author": "", "content": "", "ctime": "1528781772", "mtime": "1598456123", "rtime": "1528783263", "draft_id": "0", "view_count": 4960, "collect_count": 160, "digg_count": 310, "comment_count": 22, "hot_index": 580, "is_hot": 0, "rank_index": 0.00093416, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "3051900006063838", "user_name": "程序员解决师", "company": "CBU首席程序员解决师", "job_title": "程序员解决师", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2019/2/1/168a798dd33ef955~tplv-t2oaga2asx-image.image", "level": 3, "description": "", "followee_count": 26, "follower_count": 5208, "post_article_count": 64, "digg_article_count": 506, "got_digg_count": 6328, "got_view_count": 189658, "post_shortmsg_count": 58, "digg_shortmsg_count": 3, "isfollowed": false, "favorable_author": 0, "power": 4534, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546492, "tag_id": "6809640361531539470", "tag_name": "Node.js", "color": "#e81864", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/f16f548d25028a1fdd80.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1432234488, "mtime": 1631690352, "id_type": 9, "tag_alias": "", "post_article_count": 11514, "concern_user_count": 280711}, {"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903619582509063, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}, {"article_id": "6844903728328212488", "article_info": {"article_id": "6844903728328212488", "user_id": "2770425030912094", "category_id": "6809637767543259144", "tag_ids": [6809640398105870343], "visible_level": 0, "link_url": "https://juejin.im/post/6844903728328212488", "cover_image": "", "is_gfw": 0, "title": "JS函数节流和函数防抖", "brief_content": "1.为什么需要函数防抖和函数节流?2.什么是函数防抖和函数节流2.1函数防抖(debounce)2.2函数节流(throttle)3.应用场景类型场景函数防抖1.手机号、邮箱输入检测2.搜索框搜索输入", "is_english": 0, "is_original": 1, "user_index": 0, "original_type": 0, "original_author": "", "content": "", "ctime": "1543567750", "mtime": "1598481234", "rtime": "1543569899", "draft_id": "6845075776610517005", "view_count": 7007, "collect_count": 107, "digg_count": 110, "comment_count": 14, "hot_index": 474, "is_hot": 0, "rank_index": 0.00093399, "status": 2, "verify_status": 1, "audit_status": 2, "mark_content": ""}, "author_user_info": {"user_id": "2770425030912094", "user_name": "JarvisJie", "company": "", "job_title": "研发新菜", "avatar_large": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/gold-user-assets/2018/7/30/164ea7735e0f7d92~tplv-t2oaga2asx-image.image", "level": 2, "description": "Less is more.", "followee_count": 23, "follower_count": 26, "post_article_count": 8, "digg_article_count": 169, "got_digg_count": 290, "got_view_count": 21473, "post_shortmsg_count": 2, "digg_shortmsg_count": 4, "isfollowed": false, "favorable_author": 0, "power": 504, "study_point": 0, "university": {"university_id": "0", "name": "", "logo": ""}, "major": {"major_id": "0", "parent_id": "0", "name": ""}, "student_status": 0, "select_event_count": 0, "select_online_course_count": 0, "identity": 0, "is_select_annual": false, "select_annual_rank": 0, "annual_list_type": 0, "extraMap": {}, "is_logout": 0}, "category": {"category_id": "6809637767543259144", "category_name": "前端", "category_url": "frontend", "rank": 2, "back_ground": "https://lc-mhke0kuv.cn-n1.lcfile.com/8c95587526f346c0.png", "icon": "https://lc-mhke0kuv.cn-n1.lcfile.com/1c40f5eaba561e32.png", "ctime": 1457483942, "mtime": 1432503190, "show_type": 3, "item_type": 2, "promote_tag_cap": 4, "promote_priority": 2}, "tags": [{"id": 2546519, "tag_id": "6809640398105870343", "tag_name": "JavaScript", "color": "#616161", "icon": "https://p1-jj.byteimg.com/tos-cn-i-t2oaga2asx/leancloud-assets/5d70fd6af940df373834.png~tplv-t2oaga2asx-image.image", "back_ground": "", "show_navi": 0, "ctime": 1435884803, "mtime": 1631693185, "id_type": 9, "tag_alias": "", "post_article_count": 67405, "concern_user_count": 398957}], "user_interact": {"id": 6844903728328212488, "omitempty": 2, "user_id": 0, "is_digg": false, "is_follow": false, "is_collect": false}, "org": {"org_info": null, "org_user": null, "is_followed": false}, "req_id": "202109151606430102020911002C000E98"}], "cursor": "eyJ2IjoiNzAwNzYxNTY2NjYwOTk3OTQwMCIsImkiOjc0ODB9", "count": 22117, "has_more": true} | [
"[email protected]"
] | |
f41bb0f627ed6d8f5fd7b2f6953ef836320c19d9 | 9b68695d6d7d05bdfdcb087db532d66188cfbcdb | /bsmsm/spiders/spider.py | 67165b7f5f3a4693d22e7d719589e6d28ffc76e2 | [] | no_license | hristo-grudev/bsmsm | 1f100180535b564cd8ca59fd62b35de4cf25b460 | e7035250b07e21e25299967eee065ea588369857 | refs/heads/main | 2023-03-13T13:13:48.075506 | 2021-03-05T08:32:08 | 2021-03-05T08:32:08 | 344,745,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 985 | py | import scrapy
from scrapy.loader import ItemLoader
from ..items import BsmsmItem
from itemloaders.processors import TakeFirst
class BsmsmSpider(scrapy.Spider):
name = 'bsmsm'
start_urls = ['https://www.bsm.sm/it/news-bsm.php']
def parse(self, response):
post_links = response.xpath('//div[@class="titolo-news bold"]/a/@href').getall()
yield from response.follow_all(post_links, self.parse_post)
def parse_post(self, response):
title = response.xpath('//h1//text()').get()
description = response.xpath('//span[@itemprop="description"]//text()[normalize-space()]').getall()
description = [p.strip() for p in description]
description = ' '.join(description).strip()
date = response.xpath('//div[@class="bold"]/text()').get()
item = ItemLoader(item=BsmsmItem(), response=response)
item.default_output_processor = TakeFirst()
item.add_value('title', title)
item.add_value('description', description)
item.add_value('date', date)
return item.load_item()
| [
"[email protected]"
] | |
dfcc76777b82ff4e0e00f8715bf8f234f7907333 | 7a527060afabd2e0867d5dcf4b75592b43ef5005 | /Leetcode/简单+剑指offer题/面试题49. 丑数.py | d4544c4e9bfbe5eb88767af5250c4fe4854899b0 | [] | no_license | Stevenzzz1996/MLLCV | ff01a276cf40142c1b28612cb5b43e563ad3a24a | 314953b759212db5ad07dcb18854bf6d120ba172 | refs/heads/master | 2023-02-10T18:11:30.399042 | 2021-01-05T12:05:21 | 2021-01-05T12:05:21 | 267,804,954 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | #!usr/bin/env python
# -*- coding:utf-8 -*-
# author: sfhong2020 time:2020/4/1 20:38
# 堆/动态规划
| [
"[email protected]"
] | |
1e8ef4de1607e1cc8d39eb411fda21d27e17dbb7 | ccbfc7818c0b75929a1dfae41dc061d5e0b78519 | /aliyun-openapi-python-sdk-master/aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/SwitchTempInstanceRequest.py | fc14d771d9b5d1b07ba06e98344c8a776066f9dd | [
"Apache-2.0"
] | permissive | P79N6A/dysms_python | 44b634ffb2856b81d5f79f65889bfd5232a9b546 | f44877b35817e103eed469a637813efffa1be3e4 | refs/heads/master | 2020-04-28T15:25:00.368913 | 2019-03-13T07:52:34 | 2019-03-13T07:52:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,737 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SwitchTempInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'SwitchTempInstance','redisa')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId) | [
"[email protected]"
] | |
5c77958a70db3fdb38303d8bf678113803c62984 | d57b51ec207002e333b8655a8f5832ed143aa28c | /.history/gos_20200614055443.py | e65771e72edd5ad03a64cf83c5b0bcf4ef404048 | [] | no_license | yevheniir/python_course_2020 | b42766c4278a08b8b79fec77e036a1b987accf51 | a152d400ab4f45d9d98d8ad8b2560d6f0b408c0b | refs/heads/master | 2022-11-15T07:13:24.193173 | 2020-07-11T15:43:26 | 2020-07-11T15:43:26 | 278,890,802 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,414 | py | # # Імпорт фажливих бібліотек
# from BeautifulSoup import BeautifulSoup
# import urllib2
# import re
# # Створення функції пошуку силок
# def getLinks(url):
# # отримання та присвоєння контенту сторінки в змінну
# html_page = urllib2.urlopen(url)
# # Перетворення контенту в обєкт бібліотеки BeautifulSoup
# soup = BeautifulSoup(html_page)
# # створення пустого масиву для лінків
# links = []
# # ЗА ДОПОМОГОЮ ЧИКЛУ ПРОХЛДИМСЯ ПО ВСІХ ЕЛЕМЕНТАХ ДЕ Є СИЛКА
# for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
# # Додаємо всі силки в список
# links.append(link.get('href'))
# # повертаємо список
# return links
# -----------------------------------------------------------------------------------------------------------
# # # Імпорт фажливих бібліотек
# import subprocess
# # Створення циклу та використання функції range для генерації послідовних чисел
# for ping in range(1,10):
# # генерування IP адреси базуючись на номері ітерації
# address = "127.0.0." + str(ping)
# # виклик функції call яка робить запит на IP адрес та запис відповіді в змінну
# res = subprocess.call(['ping', '-c', '3', address])
# # За допомогою умовних операторів перевіряємо відповідь та виводимо результат
# if res == 0:
# print "ping to", address, "OK"
# elif res == 2:
# print "no response from", address
# else:
# print "ping to", address, "failed!"
# -----------------------------------------------------------------------------------------------------------
# Імпорт фажливих бібліотек
import requests
for pic_
with open('pic1.jpg', 'wb') as handle:
response = requests.get(pic_url, stream=True)
if not response.ok:
print(response)
for block in response.iter_content(1024):
if not block:
break
handle.write(block) | [
"[email protected]"
] | |
abe78bc49b85c74a1b2f4932b3ed2e0bab37eb16 | ffa21e4415ead5106f7f846bc24b0d308ace90b5 | /swagger_client/models/forecast_transaction.py | be10b2bc4700700721d9092cecf9dddd1c89aefa | [] | no_license | steini58/swagger-client | fa7b6f077e5a1b01e42c4420b214b19e1d364e4e | e5fd7bf28f8529746e18bdd799c86ad78310ffd5 | refs/heads/master | 2020-03-29T09:14:26.644065 | 2018-09-20T13:29:14 | 2018-09-20T13:29:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,665 | py | # coding: utf-8
"""
[AHOI cookbook](/ahoi/docs/cookbook/index.html) [Data Privacy](/sandboxmanager/#/privacy) [Terms of Service](/sandboxmanager/#/terms) [Imprint](https://sparkassen-hub.com/impressum/) © 2016‐2017 Starfinanz - Ein Unternehmen der Finanz Informatik # noqa: E501
OpenAPI spec version: 2.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from swagger_client.models.balance import Balance # noqa: F401,E501
from swagger_client.models.forecast import Forecast # noqa: F401,E501
from swagger_client.models.transaction import Transaction # noqa: F401,E501
class ForecastTransaction(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'forecast_balance': 'Balance',
'account_id': 'str',
'transactions': 'list[Transaction]'
}
attribute_map = {
'forecast_balance': 'forecastBalance',
'account_id': 'accountId',
'transactions': 'transactions'
}
def __init__(self, forecast_balance=None, account_id=None, transactions=None): # noqa: E501
"""ForecastTransaction - a model defined in Swagger""" # noqa: E501
self._forecast_balance = None
self._account_id = None
self._transactions = None
self.discriminator = None
self.forecast_balance = forecast_balance
self.account_id = account_id
self.transactions = transactions
@property
def forecast_balance(self):
"""Gets the forecast_balance of this ForecastTransaction. # noqa: E501
Balance forecast # noqa: E501
:return: The forecast_balance of this ForecastTransaction. # noqa: E501
:rtype: Balance
"""
return self._forecast_balance
@forecast_balance.setter
def forecast_balance(self, forecast_balance):
"""Sets the forecast_balance of this ForecastTransaction.
Balance forecast # noqa: E501
:param forecast_balance: The forecast_balance of this ForecastTransaction. # noqa: E501
:type: Balance
"""
if forecast_balance is None:
raise ValueError("Invalid value for `forecast_balance`, must not be `None`") # noqa: E501
self._forecast_balance = forecast_balance
@property
def account_id(self):
"""Gets the account_id of this ForecastTransaction. # noqa: E501
Id of account this entry belongs to # noqa: E501
:return: The account_id of this ForecastTransaction. # noqa: E501
:rtype: str
"""
return self._account_id
@account_id.setter
def account_id(self, account_id):
"""Sets the account_id of this ForecastTransaction.
Id of account this entry belongs to # noqa: E501
:param account_id: The account_id of this ForecastTransaction. # noqa: E501
:type: str
"""
if account_id is None:
raise ValueError("Invalid value for `account_id`, must not be `None`") # noqa: E501
self._account_id = account_id
@property
def transactions(self):
"""Gets the transactions of this ForecastTransaction. # noqa: E501
List of unappliedTransaction # noqa: E501
:return: The transactions of this ForecastTransaction. # noqa: E501
:rtype: list[Transaction]
"""
return self._transactions
@transactions.setter
def transactions(self, transactions):
"""Sets the transactions of this ForecastTransaction.
List of unappliedTransaction # noqa: E501
:param transactions: The transactions of this ForecastTransaction. # noqa: E501
:type: list[Transaction]
"""
if transactions is None:
raise ValueError("Invalid value for `transactions`, must not be `None`") # noqa: E501
self._transactions = transactions
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ForecastTransaction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
55d5457523106be301584f485d2044be5c180be7 | ed32eb1eb0a328a4ffe89e178fc4987470f333cd | /exercise/day1-4/compute.py | 7e64771516f775b9bee62dbd5f5d8fe460b8b9c5 | [] | no_license | xiaoyaojjian/py_learn | c6f5bdf31bcebf29dd914e81e6be9305a61265cc | 95e494ea823d2074a05c1c2a49595002a1576093 | refs/heads/master | 2020-12-05T23:22:11.017066 | 2016-09-08T01:13:08 | 2016-09-08T01:13:08 | 67,654,055 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | """
计算器, 用了eval() 没有任何意义了, 四则运算应该单独写一个函数处理
"""
import re
a = '1 - 2 * ( (60-30 +(-40/5) * (9-2*5/3 + 7 /3*99/4*2998 +10 * 568/14 )) - (-4*3)/ (16-3*2) )'
print(eval(a))
def get_brackets_data(formula):
return re.findall('\(([^()]+)\)', formula)
while re.search('[()]', a):
for i in get_brackets_data(a):
a = a.replace('(%s)' % i, str(eval(i)))
print(a)
print(eval(a)) | [
"[email protected]"
] | |
4577eaed8369402971817fc693acae6518a09f80 | bd81142f05e57b637cc0ddd63edbc3c6b5b4a0a2 | /knowledge-driven-dialogue/generative_pt/tools/conversation_client.py | 598d22bca00ebba8dd12eac1105b2e8df08d391f | [
"MIT"
] | permissive | Chriszhangmw/ChatBots | 876d751f30d1d8ea759440fe1e7d4beb6ef94087 | 0735918e326bd6ff20b70388ae199ec11d9cbc11 | refs/heads/master | 2021-12-14T04:10:53.452552 | 2021-11-28T12:23:10 | 2021-11-28T12:23:10 | 210,681,028 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,061 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# Copyright (c) 2019 Baidu.com, Inc. All Rights Reserved
#
################################################################################
"""
File: conversation_client.py
"""
import sys
import socket
SERVER_IP = "127.0.0.1"
SERVER_PORT = 8601
def conversation_client(text):
"""
conversation_client
"""
mysocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysocket.connect((SERVER_IP, SERVER_PORT))
mysocket.sendall(text.encode())
result = mysocket.recv(4096).decode()
mysocket.close()
return result
def main():
"""
main
"""
if len(sys.argv) < 2:
print("Usage: " + sys.argv[0] + " eval_file")
exit()
for line in open(sys.argv[1]):
response = conversation_client(line.strip())
print(response)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print("\nExited from the program ealier!")
| [
"[email protected]"
] | |
cd7536fbdfbd4277136ae6edaee967cd1d86ab18 | 60618d48e09a140926d97b01cb9b6f76fcc65703 | /data analysis/itheima/plot.py | 6a18b4bded99c6dbf7247578045daa0392a6d27a | [] | no_license | Incipe-win/Python | ca8f36cc8785eb13512f71a3cf10149d4e1b855e | 5bab36b90591c74dedb6ead3484a279b90a1bcbd | refs/heads/master | 2021-01-07T08:11:42.293541 | 2020-12-06T09:17:02 | 2020-12-06T09:17:02 | 241,629,236 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,102 | py | from matplotlib import pyplot as plt
import random
from matplotlib import font_manager
my_font = font_manager.FontProperties(
fname="/usr/share/fonts/opentype/noto/NotoSansCJK-Bold.ttc")
# import matplotlib
#
# font = {"family": "Noto Sans Mono",
# "weight": "bold",
# "size": "larger"
# }
# matplotlib.rc("font", **font)
# x = range(2, 26, 2)
# y = [15, 13, 14.5, 17, 20, 25, 26, 26, 24, 22, 18, 15]
#
# plt.figure(num="hh", figsize=(20, 8), dpi=80)
# plt.tick_params(axis='x', colors="green")
# x_ticks = [i/2 for i in range(4, 49)]
# x_labels = ["h" + str(i) for i in range(1, 14)]
# plt.xticks(x_ticks[::3], x_labels)
# plt.yticks(range(min(y), max(y) + 1))
#
# plt.plot(x, y)
# plt.savefig("./test.svg")
# plt.show()
# y = [random.randint(20, 35) for i in range(120)]
# cnt = 10
# x = []
# for i in range(120):
# if i == 60:
# cnt += 1
# i %= 60
# s = str(i) if i >= 10 else "0" + str(i)
# x.append(str(cnt) + ":" + s)
# plt.figure(figsize=(100, 15), dpi=80)
# plt.tick_params(axis='both', colors="green")
# plt.xticks(list(range(120))[::3], labels=x[::3], rotation=45,
# fontproperties=my_font)
# plt.yticks(range(19, 36))
# plt.xlabel("时间", fontproperties=my_font)
# plt.ylabel("温度 单位(摄氏度)", fontproperties=my_font)
# plt.title("10~12点每分钟气温变化情况", fontproperties=my_font)
# plt.plot(x, y)
# plt.show()
y1 = [1, 0, 1, 1, 2, 4, 3, 2, 3, 4, 4, 5, 6, 5, 4, 3, 3, 1, 1, 1]
y2 = [1, 0, 3, 1, 2, 2, 3, 3, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1]
x = ["{}岁".format(i) for i in range(11, 31)]
plt.figure(figsize=(20, 15), dpi=80)
plt.tick_params(axis="both", colors="green")
plt.xticks(list(range(20)), labels=x, rotation=45, fontproperties=my_font)
plt.yticks(range(min(y1), max(y1)))
plt.xlabel("xx", fontproperties=my_font)
plt.ylabel("xxxx 单位(个)", fontproperties=my_font)
plt.title("xx~xxxx", fontproperties=my_font)
plt.grid(alpha=0.4)
plt.plot(x, y1, color="green", label="xx")
plt.plot(x, y2, color="blue", label="xx")
plt.legend(prop=my_font)
# plt.show()
plt.savefig("./plot.svg")
| [
"[email protected]"
] | |
40d6ad7c3c49e310f10e435aee22d2aa9b19a03c | 68eb441faf3f9415fbcbc8330f9b01ad6933bede | /ebook/machinelearningdemo/MachineLearningLessonPro/ML_1/3.0loadfile_fromdata.py | 1711ef93a3ae8eea6d78e080a3ca39a2781775f4 | [] | no_license | OrriO/jupyter_myworkspace | fb8e97865f15abe2fb3aa01985fdb4f34317f15f | a592ab92f38a1cd466c454bb36fd0002c75202a9 | refs/heads/master | 2023-06-01T02:00:36.986439 | 2021-07-08T13:44:26 | 2021-07-08T13:44:26 | 381,997,768 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 784 | py | #-*- coding: utf-8 -*-
# @Time : 2018/12/6 10:08
# @Author : Z
# @Email : S
# @File : 3.0loadfile_fromdata.py
from surprise import BaselineOnly
from surprise import Dataset
from surprise import Reader
from surprise.model_selection import cross_validate
import os
# path to dataset file
file_path = os.path.expanduser('./u.data')
# As we're loading a custom dataset, we need to define a reader. In the
# movielens-100k dataset, each line has the following format:
# 'user item rating timestamp', separated by '\t' characters.
reader = Reader(line_format='user item rating timestamp', sep='\t')
data = Dataset.load_from_file(file_path, reader=reader)
# We can now use this dataset as we please, e.g. calling cross_validate
cross_validate(BaselineOnly(), data, verbose=True) | [
"[email protected]"
] | |
ed6019a55cbe49b15e4cbe71343c9ea879f3e984 | bd14c979335112b7718b0feda18ebf0e3b40fe5c | /arihon_biginners/review_of_binsearch.py | 3ae4f4958eb3c9ebc42c27e83fb5e6cc36c26f9e | [] | no_license | ababa831/atcoder_beginners | 22c57b15333d110126d1b1afadc0ff5e8784fc4f | 1a30882ce7f20f312045d5dc7bfaa5688cc8a88e | refs/heads/master | 2023-03-07T15:47:19.750682 | 2020-03-04T19:53:45 | 2020-03-04T19:53:45 | 143,360,607 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Review of a simple bit search algorithm
D = 3
print('Number of digit', D)
combinations = []
for i in range(1 << D):
flaged = []
for j in range(D):
if (i >> j) & 1:
flaged.append(j + 1)
print('Binary {} has flags at digit {}'.format(bin(i), flaged))
combinations.append(flaged)
print('Total number of combinations ', len(combinations))
print('Combinations: ', combinations)
| [
"[email protected]"
] | |
195b5b5bf3d61d63758c2c4cdb7d1942a70e832d | 3f5d531abcf69bc9f7de317ce46d45786272013d | /src/config/test/test_default.py | 7711ddcd42e45b5fc7232a940a9bceb55d370e5a | [
"MIT"
] | permissive | thak123/i-tagger | 61a8880e250069fc40c0a616e718a739bd27cb58 | dd8502947011e95b72b243fad9aad094b9a7d15c | refs/heads/master | 2021-05-14T16:51:20.799677 | 2018-01-02T12:09:36 | 2018-01-02T12:09:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 477 | py | import unittest
from helpers.config_helper import *
from config.default import create_default_config
class TestDefault(unittest.TestCase):
def test_creation(self):
config_helper = ConfigManager("/tmp/config.ini")
if not os.path.exists("/tmp/config.ini"):
create_default_config(config_helper)
self.assertEqual(os.path.exists("/tmp/config.ini"), True)
self.assertEqual(config_helper.get_item("Schema", "text_column"), "word")
| [
"[email protected]"
] | |
17af632bafeab7fe05ec6df418b301f86f74b0cb | 582df95fc9b1d00e6c75321ad6a7894e0722245e | /tests/test_download_model.py | 34baadec66352e161086017e45cd3ea66aadfa94 | [
"Apache-2.0"
] | permissive | viniarck/podcaststore-django | 2c4db217126e3dbdf1244bb22ae1aea0cd502874 | 90316ffb18793b089291a0e28ac3ee2bb5e458cb | refs/heads/master | 2020-06-29T13:16:18.449358 | 2019-12-05T11:44:34 | 2019-12-05T11:44:34 | 200,547,759 | 0 | 0 | Apache-2.0 | 2020-06-05T22:33:18 | 2019-08-04T22:29:38 | Python | UTF-8 | Python | false | false | 828 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pytest
from podcaststore_api.models.download import Download, DownloadSerializer
class TestDownloadModel:
"""TestDownloadModel."""
@pytest.mark.django_db
def test_repr(self, create_download: Download) -> None:
"""Test __repr__ method."""
download = create_download
assert (
repr(download)
== f"Download({download.id}, {download.track_id}, {download.date})"
)
class TestTagSerializer:
"""TestTagSerializer"""
@pytest.mark.django_db
def test_ser_data(self, create_download: Download) -> None:
"""Test serialization data."""
download_serd = DownloadSerializer(create_download)
for field in ("id", "track_id", "date"):
assert field in download_serd.data
| [
"[email protected]"
] | |
14738cb6e89be7fa2dc18c2e0d95c3d9f63fcf63 | af4c325a5a20cb054f22723af9a693cdf8cda3e5 | /mysite/env/lib/python3.7/encodings/euc_kr.py | eadb6c4e18a9f3978efd0efea1024e9adac530cb | [] | no_license | myracheng/treehacks19 | aff1d6356f480dfdc4ca75f286fbcbd29c110a35 | 9e2d9195f749415eddcfabaceed0f9d911b12c7e | refs/heads/master | 2020-07-04T11:07:02.833157 | 2019-02-17T19:24:08 | 2019-02-17T19:24:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py | /Users/gracelu/anaconda3/lib/python3.7/encodings/euc_kr.py | [
"[email protected]"
] | |
a161c21ea948b07a05375c924672731065a639c1 | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/detection/YOLOX_ID2833_for_PyTorch/tests/test_models/test_dense_heads/test_yolact_head.py | 11b74a3b9a7c7d2bae8547cf62e2ad4fdb73cec3 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference",
"GPL-1.0-or-later"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 5,894 | py |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
def test_yolact_head_loss():
"""Tests yolact head losses when truth is empty and non-empty."""
s = 550
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False,
min_gt_box_wh=[4.0, 4.0]))
bbox_head = YOLACTHead(
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True,
train_cfg=train_cfg)
segm_head = YOLACTSegmHead(
in_channels=256,
num_classes=80,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
mask_head = YOLACTProtonet(
num_classes=80,
in_channels=256,
num_protos=32,
max_masks_to_train=100,
loss_mask_weight=6.125)
feat = [
torch.rand(1, 256, feat_size, feat_size)
for feat_size in [69, 35, 18, 9, 5]
]
cls_score, bbox_pred, coeff_pred = bbox_head.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# Test segm head and mask head
segm_head_outs = segm_head(feat[0])
empty_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
empty_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas,
sampling_results)
# When there is no truth, the segm and mask loss should be zero.
empty_segm_loss = sum(empty_segm_loss['loss_segm'])
empty_mask_loss = sum(empty_mask_loss['loss_mask'])
assert empty_segm_loss.item() == 0, (
'there should be no segm loss when there are no true boxes')
assert empty_mask_loss == 0, (
'there should be no mask loss when there are no true boxes')
# When truth is non-empty then cls, box, mask, segm loss should be
# nonzero for random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()]
one_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
one_gt_cls_loss = sum(one_gt_losses['loss_cls'])
one_gt_box_loss = sum(one_gt_losses['loss_bbox'])
assert one_gt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert one_gt_box_loss.item() > 0, 'box loss should be non-zero'
one_gt_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
one_gt_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
one_gt_segm_loss = sum(one_gt_segm_loss['loss_segm'])
one_gt_mask_loss = sum(one_gt_mask_loss['loss_mask'])
assert one_gt_segm_loss.item() > 0, 'segm loss should be non-zero'
assert one_gt_mask_loss.item() > 0, 'mask loss should be non-zero'
| [
"[email protected]"
] | |
78a9d21635b3b2f9d4282deb74507c8b86a89628 | ea2015881c18583a4295122f2e2c1d2dbd3e32f9 | /_pipeline_scripts/script_6.4.3_ps_prot_pairDave.py | 6b12ef0ebf30ba4e369c2c941843af7dcdf42b21 | [] | no_license | panchyni/PseudogenePipeline | ad0b210d943bfdc83da1eeb63c0d7dec2a8719ae | 44a5bfd034dfd9b21808b6e6c5b789f141912c33 | refs/heads/master | 2021-01-11T15:54:57.514872 | 2017-04-17T21:13:16 | 2017-04-17T21:13:16 | 79,955,253 | 4 | 2 | null | null | null | null | UTF-8 | Python | false | false | 651 | py |
import sys
print "Read protein sequence file..."
inp = open(sys.argv[1])
inl = inp.readlines()
p = {}
for i in inl:
if i[0] == ">":
g = i[1:-1].split(".")
if g[0] not in p:
p[g[0]] = [g[1]]
else:
p[g[0]].append(g[1])
print "Read pair file..."
inp = open(sys.argv[2]) # osv5_ps_gene.pairs
oup = open("osv5_ps_prot.pairs","w")
inl = inp.readlines()
miss = []
for i in inl:
L = i[:-1].split("\t")
if L[1] in p:
for j in p[L[1]]:
oup.write("%s\t%s.%s\n" % (L[0],L[1],j))
else:
if L[1] not in miss:
miss.append(L[1])
print "The following genes are not in the prot seq file:"
for i in miss:
print "",i
print "Done!"
| [
"panchyni.msu.edu"
] | panchyni.msu.edu |
a07caa95edb7398b9588e8dbf134ba5d00978be0 | 977073b97242b8bf48b49e145395d8d948890924 | /experiments/run_submission.py | 0aaa1722561252ba0e1393e56e7ad046f830a6f5 | [] | no_license | upura/booking-challenge-2021 | c80e88f8545ae1b5b8e3d9da3bac49f3ea982ee5 | 7b6daa2fabd28773cc452cd6605861372ea64d78 | refs/heads/master | 2023-03-03T16:22:45.258906 | 2021-02-17T20:36:06 | 2021-02-17T20:36:06 | 325,207,679 | 10 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,768 | py | import gc
import numpy as np
import pandas as pd
from sklearn import preprocessing
import torch
from torch.utils.data import DataLoader
from tqdm import tqdm
from src.datasets import load_train_test, BookingDataset
from src.models import BookingNN
from src.utils import seed_everything
from src.runner import CustomRunner
if __name__ == '__main__':
seed_everything(0)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print(device)
categorical_cols = [
'user_id',
# 'device_class',
# 'affiliate_id',
'booker_country',
# 'hotel_country'
]
train_test = load_train_test()
cat_dims = [int(train_test[col].nunique()) for col in categorical_cols]
emb_dims = [(x, min(50, (x + 1) // 2)) for x in cat_dims]
target_le = preprocessing.LabelEncoder()
train_test['city_id'] = target_le.fit_transform(train_test['city_id'])
for c in categorical_cols:
le = preprocessing.LabelEncoder()
train_test[c] = le.fit_transform(train_test[c].astype(str).fillna('unk').values)
test = train_test[~train_test['row_num'].isnull()]
test_trips = test[test['city_id'] != test['city_id'].shift(1)].query('city_id!=0').groupby('utrip_id')['city_id'].apply(lambda x: x.values).reset_index()
X_test = test[test['city_id'] != test['city_id'].shift(1)].query('city_id!=0').groupby('utrip_id')[categorical_cols].last().reset_index()
X_test['city_id'] = test_trips['city_id']
X_test = X_test.reset_index(drop=True)
test_dataset = BookingDataset(X_test, is_train=False)
test_loader = DataLoader(test_dataset,
shuffle=False,
batch_size=1)
del train_test, test, test_trips
gc.collect()
model_paths = [
'../input/booking-bi-lstm-ep1/logdir_nn000',
]
for mp in model_paths:
for fold_id in (0,):
runner = CustomRunner(device=device)
model = BookingNN(len(target_le.classes_))
pred = []
for prediction in tqdm(runner.predict_loader(loader=test_loader,
resume=f'{mp}/fold{fold_id}/checkpoints/best.pth',
model=model,)):
pred.append(target_le.inverse_transform(np.argsort(prediction.cpu().numpy()[-1, :])[-4:]))
pred = np.array(pred)
np.save(f"y_pred{mp.replace('/', '_').replace('.', '')}_fold{fold_id}", pred)
submission = pd.concat([
X_test['utrip_id'],
pd.DataFrame(pred, columns=['city_id_1', 'city_id_2', 'city_id_3', 'city_id_4'])
], axis=1)
print(submission.head())
submission.to_csv('submission.csv', index=False)
| [
"[email protected]"
] | |
263f9d74b0c56b54ae61b705fc78e35537aa37aa | 1bdf38834c22b0100595cb22f2862fd1ba0bc1e7 | /code394DecodeString.py | 6498e6f8c2f6d46d2cadc4e51089b069f52ef7bd | [] | no_license | cybelewang/leetcode-python | 48d91c728856ff577f1ccba5a5340485414d6c6e | 635af6e22aa8eef8e7920a585d43a45a891a8157 | refs/heads/master | 2023-01-04T11:28:19.757123 | 2020-10-29T05:55:35 | 2020-10-29T05:55:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,120 | py | """
394 Decode String
Given an encoded string, return it's decoded string.
The encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.
You may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.
Furthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].
Examples:
s = "3[a]2[bc]", return "aaabcbc".
s = "3[a2[c]]", return "accaccacc".
s = "2[abc]3[cd]ef", return "abcabccdcdcdef".
"""
class Solution:
# OJ's best
def decodeString(self, s):
stack = []; curNum = 0; curString = ''
for c in s:
if c == '[':
stack.append(curString)
stack.append(curNum)
curString = ''
curNum = 0
elif c == ']':
num = stack.pop()
prevString = stack.pop()
curString = prevString + num*curString
elif c.isdigit():
curNum = curNum*10 + int(c)
else:
curString += c
return curString
# my solution
def decodeString2(self, s):
"""
:type s: str
:rtype: str
"""
stack, num = [''], 0
for c in s:
if c.isdigit():
num = num*10 + ord(c) - ord('0')
elif c == '[':
stack.append(num)
stack.append('')
num = 0
elif c == ']':
sub = stack.pop()
count = stack.pop()
stack[-1] += sub*count
num = 0
else:
stack[-1] += c
num = 0
return stack[-1]
obj = Solution()
test_cases = ['', 'abcde', '3[a]2[bc]', '3[a2[c]]', '2[abc]3[cd]ef']
for case in test_cases:
print(obj.decodeString(case)) | [
"[email protected]"
] | |
8202094a23d76e36f8d0a8c3817a8c188a349efa | 318013ccb8738ace0ec72965dac0a3e3fe2fecad | /venv/lib/python3.7/site-packages/thumbor/engines/pil.py | 74b45e391dd24609f25b3067ab5ceab52c495cf8 | [] | no_license | nahyunkwon/Processing-3DImages | 792deafbd1a607af8cae439b5d7ab81f772f6653 | bde217aad08dd911ae8125edeae42f7b674614f2 | refs/heads/master | 2023-01-02T10:29:41.325974 | 2020-11-01T19:02:19 | 2020-11-01T19:02:19 | 299,133,212 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,327 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from __future__ import absolute_import
import os
from tempfile import mkstemp
from subprocess import Popen, PIPE
from io import BytesIO
from PIL import Image, ImageFile, ImageDraw, ImageSequence, JpegImagePlugin, ImageFilter
from thumbor.engines import BaseEngine
from thumbor.engines.extensions.pil import GifWriter
from thumbor.utils import logger, deprecated
try:
from thumbor.ext.filters import _composite
FILTERS_AVAILABLE = True
except ImportError:
FILTERS_AVAILABLE = False
FORMATS = {
'.tif': 'PNG', # serve tif as png
'.jpg': 'JPEG',
'.jpeg': 'JPEG',
'.gif': 'GIF',
'.png': 'PNG',
'.webp': 'WEBP'
}
ImageFile.MAXBLOCK = 2 ** 25
ImageFile.LOAD_TRUNCATED_IMAGES = True
DecompressionBombExceptions = (Image.DecompressionBombWarning,)
if hasattr(Image, 'DecompressionBombError'):
DecompressionBombExceptions += (Image.DecompressionBombError,)
class Engine(BaseEngine):
def __init__(self, context):
super(Engine, self).__init__(context)
self.subsampling = None
self.qtables = None
self.original_mode = None
try:
if self.context.config.MAX_PIXELS is None or int(self.context.config.MAX_PIXELS):
Image.MAX_IMAGE_PIXELS = self.context.config.MAX_PIXELS
except (AttributeError, TypeError, ValueError): # invalid type
logger.info('MAX_PIXELS config variable set to invalid type. Has to be int on None')
def gen_image(self, size, color):
if color == 'transparent':
color = None
img = Image.new("RGBA", size, color)
return img
def create_image(self, buffer):
try:
img = Image.open(BytesIO(buffer))
except DecompressionBombExceptions as e:
logger.warning("[PILEngine] create_image failed: {0}".format(e))
return None
self.icc_profile = img.info.get('icc_profile')
self.exif = img.info.get('exif')
self.original_mode = img.mode
self.subsampling = JpegImagePlugin.get_sampling(img)
if (self.subsampling == -1): # n/a for this file
self.subsampling = None
self.qtables = getattr(img, 'quantization', None)
if self.context.config.ALLOW_ANIMATED_GIFS and self.extension == '.gif':
frames = []
for frame in ImageSequence.Iterator(img):
frames.append(frame.convert('P'))
img.seek(0)
self.frame_count = len(frames)
return frames
return img
def get_resize_filter(self):
config = self.context.config
resample = config.PILLOW_RESAMPLING_FILTER if config.PILLOW_RESAMPLING_FILTER is not None else 'LANCZOS'
available = {
'LANCZOS': Image.LANCZOS,
'NEAREST': Image.NEAREST,
'BILINEAR': Image.BILINEAR,
'BICUBIC': Image.BICUBIC,
}
if hasattr(Image, 'HAMMING'):
available['HAMMING'] = Image.HAMMING
return available.get(resample.upper(), Image.LANCZOS)
def draw_rectangle(self, x, y, width, height):
# Nasty retry if the image is loaded for the first time and it's truncated
try:
d = ImageDraw.Draw(self.image)
except IOError:
d = ImageDraw.Draw(self.image)
d.rectangle([x, y, x + width, y + height])
del d
def resize(self, width, height):
# Indexed color modes (such as 1 and P) will be forced to use a
# nearest neighbor resampling algorithm. So we convert them to
# RGB(A) mode before resizing to avoid nasty scaling artifacts.
if self.image.mode in ['1', 'P']:
logger.debug('converting image from 8-bit/1-bit palette to 32-bit RGB(A) for resize')
if self.image.mode == '1':
target_mode = 'RGB'
else:
# convert() figures out RGB or RGBA based on palette used
target_mode = None
self.image = self.image.convert(mode=target_mode)
size = (int(width), int(height))
# Tell image loader what target size we want (only JPG for a moment)
self.image.draft(None, size)
resample = self.get_resize_filter()
self.image = self.image.resize(size, resample)
def crop(self, left, top, right, bottom):
self.image = self.image.crop((
int(left),
int(top),
int(right),
int(bottom)
))
def rotate(self, degrees):
# PIL rotates counter clockwise
if degrees == 90:
self.image = self.image.transpose(Image.ROTATE_90)
elif degrees == 180:
self.image = self.image.transpose(Image.ROTATE_180)
elif degrees == 270:
self.image = self.image.transpose(Image.ROTATE_270)
else:
self.image = self.image.rotate(degrees, expand=1)
def flip_vertically(self):
self.image = self.image.transpose(Image.FLIP_TOP_BOTTOM)
def flip_horizontally(self):
self.image = self.image.transpose(Image.FLIP_LEFT_RIGHT)
def get_default_extension(self):
# extension is not present => force JPEG or PNG
if self.image.mode in ['P', 'RGBA', 'LA']:
return '.png'
else:
return '.jpeg'
def read(self, extension=None, quality=None): # NOQA
# returns image buffer in byte format.
img_buffer = BytesIO()
requested_extension = extension or self.extension
# 1 and P mode images will be much smaller if converted back to
# their original mode. So let's do that after resizing. Get $$.
if self.context.config.PILLOW_PRESERVE_INDEXED_MODE and requested_extension in [None, '.png', '.gif'] \
and self.original_mode in ['P', '1'] and self.original_mode != self.image.mode:
if self.original_mode == '1':
self.image = self.image.convert('1')
else:
# libimagequant might not be enabled on compile time
# but it's better than default octree for RGBA images, so worth a try
quantize_default = True
try:
# Option available since Pillow 3.3.0
if hasattr(Image, 'LIBIMAGEQUANT'):
self.image = self.image.quantize(method=Image.LIBIMAGEQUANT)
quantize_default = False
except ValueError as ex:
if 'dependency' not in str(ex).lower():
raise
if quantize_default:
self.image = self.image.quantize()
ext = requested_extension or self.get_default_extension()
options = {
'quality': quality
}
if ext == '.jpg' or ext == '.jpeg':
options['optimize'] = True
if self.context.config.PROGRESSIVE_JPEG:
# Can't simply set options['progressive'] to the value
# of self.context.config.PROGRESSIVE_JPEG because save
# operates on the presence of the key in **options, not
# the value of that setting.
options['progressive'] = True
if self.image.mode != 'RGB':
self.image = self.image.convert('RGB')
else:
subsampling_config = self.context.config.PILLOW_JPEG_SUBSAMPLING
qtables_config = self.context.config.PILLOW_JPEG_QTABLES
if subsampling_config is not None or qtables_config is not None:
options['quality'] = 0 # can't use 'keep' here as Pillow would try to extract qtables/subsampling and fail
orig_subsampling = self.subsampling
orig_qtables = self.qtables
if (subsampling_config == 'keep' or subsampling_config is None) and (orig_subsampling is not None):
options['subsampling'] = orig_subsampling
else:
options['subsampling'] = subsampling_config
if (qtables_config == 'keep' or qtables_config is None) and (orig_qtables and 2 <= len(orig_qtables) <= 4):
options['qtables'] = orig_qtables
else:
options['qtables'] = qtables_config
if ext == '.png' and self.context.config.PNG_COMPRESSION_LEVEL is not None:
options['compress_level'] = self.context.config.PNG_COMPRESSION_LEVEL
if options['quality'] is None:
options['quality'] = self.context.config.QUALITY
if self.icc_profile is not None:
options['icc_profile'] = self.icc_profile
if self.context.config.PRESERVE_EXIF_INFO:
if self.exif is not None:
options['exif'] = self.exif
try:
if ext == '.webp':
if options['quality'] == 100:
logger.debug("webp quality is 100, using lossless instead")
options['lossless'] = True
options.pop('quality')
if self.image.mode not in ['RGB', 'RGBA']:
if self.image.mode == 'P':
mode = 'RGBA'
else:
mode = 'RGBA' if self.image.mode[-1] == 'A' else 'RGB'
self.image = self.image.convert(mode)
if ext in ['.png', '.gif'] and self.image.mode == 'CMYK':
self.image = self.image.convert('RGBA')
self.image.format = FORMATS.get(ext, FORMATS[self.get_default_extension()])
self.image.save(img_buffer, self.image.format, **options)
except IOError:
logger.exception('Could not save as improved image, consider to increase ImageFile.MAXBLOCK')
self.image.save(img_buffer, FORMATS[ext])
results = img_buffer.getvalue()
img_buffer.close()
self.extension = ext
return results
def read_multiple(self, images, extension=None):
gif_writer = GifWriter()
img_buffer = BytesIO()
duration = []
converted_images = []
xy = []
dispose = []
for im in images:
duration.append(float(im.info.get('duration', 80)) / 1000)
converted_images.append(im.convert("RGB"))
xy.append((0, 0))
dispose.append(1)
loop = int(self.image.info.get('loop', 1))
images = gif_writer.convertImagesToPIL(converted_images, False, None)
gif_writer.writeGifToFile(img_buffer, images, duration, loop, xy, dispose)
results = img_buffer.getvalue()
img_buffer.close()
tmp_fd, tmp_file_path = mkstemp()
f = os.fdopen(tmp_fd, "w")
f.write(results)
f.close()
command = [
'gifsicle',
'--colors',
'256',
tmp_file_path
]
popen = Popen(command, stdout=PIPE)
pipe = popen.stdout
pipe_output = pipe.read()
pipe.close()
if popen.wait() == 0:
results = pipe_output
os.remove(tmp_file_path)
return results
@deprecated("Use image_data_as_rgb instead.")
def get_image_data(self):
return self.image.tobytes()
def set_image_data(self, data):
self.image.frombytes(data)
@deprecated("Use image_data_as_rgb instead.")
def get_image_mode(self):
return self.image.mode
def image_data_as_rgb(self, update_image=True):
converted_image = self.image
if converted_image.mode not in ['RGB', 'RGBA']:
if 'A' in converted_image.mode:
converted_image = converted_image.convert('RGBA')
elif converted_image.mode == 'P':
# convert() figures out RGB or RGBA based on palette used
converted_image = converted_image.convert(None)
else:
converted_image = converted_image.convert('RGB')
if update_image:
self.image = converted_image
return converted_image.mode, converted_image.tobytes()
def convert_to_grayscale(self, update_image=True, with_alpha=True):
if 'A' in self.image.mode and with_alpha:
image = self.image.convert('LA')
else:
image = self.image.convert('L')
if update_image:
self.image = image
return image
def has_transparency(self):
has_transparency = 'A' in self.image.mode or 'transparency' in self.image.info
if has_transparency:
# If the image has alpha channel,
# we check for any pixels that are not opaque (255)
has_transparency = min(self.image.convert('RGBA').getchannel('A').getextrema()) < 255
return has_transparency
def paste(self, other_engine, pos, merge=True):
if merge and not FILTERS_AVAILABLE:
raise RuntimeError(
'You need filters enabled to use paste with merge. Please reinstall ' +
'thumbor with proper compilation of its filters.')
self.enable_alpha()
other_engine.enable_alpha()
image = self.image
other_image = other_engine.image
if merge:
sz = self.size
other_size = other_engine.size
mode, data = self.image_data_as_rgb()
other_mode, other_data = other_engine.image_data_as_rgb()
imgdata = _composite.apply(
mode, data, sz[0], sz[1],
other_data, other_size[0], other_size[1], pos[0], pos[1])
self.set_image_data(imgdata)
else:
image.paste(other_image, pos)
def enable_alpha(self):
if self.image.mode != 'RGBA':
self.image = self.image.convert('RGBA')
def strip_icc(self):
self.icc_profile = None
def strip_exif(self):
self.exif = None
def blur(self, radius):
self.image = self.image.filter(ImageFilter.GaussianBlur(radius))
| [
"[email protected]"
] | |
2542635ffe3127c2fbac935e327705fd7fcb674b | cc7bbdbb22cb6f7e7916388a5ee8218bc8ffa158 | /Python3/Tornado/apps/pg/PG_Client/clientadmin/utils.py | 971af7cb5a0d2998e2e927d20769b4a5cd027213 | [
"MIT"
] | permissive | youngqqcn/QBlockChainNotes | a816e067642f48a6da38b624663254b4016ec496 | c9c143eaba6c06e3cee866669ec286e4d3cdbba8 | refs/heads/master | 2023-04-03T23:31:05.585545 | 2023-03-30T09:29:07 | 2023-03-30T09:29:07 | 155,657,459 | 37 | 15 | MIT | 2023-03-06T23:09:32 | 2018-11-01T03:33:11 | JavaScript | UTF-8 | Python | false | false | 1,574 | py |
def jwt_response_payload_handler(token, user=None, request=None):
"""
自定义jwt认证成功返回数据
:token 返回的jwt
:user 当前登录的用户信息[对象]
:request 当前本次客户端提交过来的数据
"""
if user.username != None:
return {
'code': "fail",
"status": 200,
"data": {
"gcode" : user.username,
"detail": "请输入验证码,重新登录!",
}
}
return {
'code': "success",
"status": 200,
"data": {
'token': token,
'pro_id': user.pro_id,
'username': user.pro_name,
'email': user.email,
'tel_no': user.tel_no,
"detail": "登录成功!",
}
}
def jwt_response_payload_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 请检查账号信息是否正确,重新登录! ",
}
}
def jwt_response_payload_code_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 请检查谷歌验证码是否正确,重新登录! ",
}
}
def jwt_response_payload_frequently_error_handler(request = None):
return {
"code": "fail",
"status": 400,
"data": {
"detail": "登录失败! 登录频繁! ",
}
}
| [
"[email protected]"
] | |
964d0631249c05ccd8a57becf125da4429dca45e | 3dfbd430ef10352acd4a4cfbeb51c01e78ad0dd1 | /updatesearch/pipeline_xml.py | 20b05fae0c8e62b7e8d8a4d81c2730aedf8e3432 | [
"BSD-2-Clause"
] | permissive | DalavanCloud/search-journals-proc | f09c7e29ede35e6756ccee5f20fabec9c1676224 | a246688ffd213c6ff814c290ea2190f7de358def | refs/heads/master | 2020-04-29T06:44:04.593174 | 2017-11-09T19:30:37 | 2017-11-09T19:30:37 | 175,927,206 | 1 | 0 | null | 2019-03-16T04:44:47 | 2019-03-16T04:44:47 | null | UTF-8 | Python | false | false | 19,924 | py | # coding: utf-8
from lxml import etree as ET
import plumber
from citedby import client
CITEDBY = client.ThriftClient(domain='citedby.scielo.org:11610')
"""
Full example output of this pipeline:
<doc>
<field name="id">art-S0102-695X2015000100053-scl</field>
<field name="journal_title">Revista Ambiente & Água</field>
<field name="in">scl</field>
<field name="ac">Agricultural Sciences</field>
<field name="type">editorial</field>
<field name="ur">art-S1980-993X2015000200234</field>
<field name="authors">Marcelo dos Santos, Targa</field>
<field name="orcidid">orcidid</field>
<field name="lattesid">lattesid</field>
<field name="ti_*">Benefits and legacy of the water crisis in Brazil</field>
<field name="pg">234-239</field>
<field name="doi">10.1590/S0102-67202014000200011</field>
<field name="wok_citation_index">SCIE</field>
<field name="volume">48</field>
<field name="supplement_volume">48</field>
<field name="issue">7</field>
<field name="supplement_issue">suppl. 2</field>
<field name="start_page">216</field>
<field name="end_page">218</field>
<field name="ta">Rev. Ambient. Água</field>
<field name="la">en</field>
<field name="fulltext_pdf_pt">http://www.scielo.br/pdf/ambiagua/v10n2/1980-993X-ambiagua-10-02-00234.pdf</field>
<field name="fulltext_pdf_pt">http://www.scielo.br/scielo.php?script=sci_abstract&pid=S0102-67202014000200138&lng=en&nrm=iso&tlng=pt</field>
<field name="da">2015-06</field>
<field name="ab_*">In this editorial, we reflect on the benefits and legacy of the water crisis....</field>
<field name="aff_country">Brasil</field>
<field name="aff_institution">usp</field>
<field name="sponsor">CNPQ</field>
</doc>
"""
CITABLE_DOCUMENT_TYPES = (
u'article-commentary',
u'brief-report',
u'case-report',
u'rapid-communication',
u'research-article',
u'review-article'
)
class SetupDocument(plumber.Pipe):
def transform(self, data):
xml = ET.Element('doc')
return data, xml
class SubjectAreas(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.journal.subject_areas:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
if len(raw.journal.subject_areas) > 2:
field = ET.Element('field')
field.text = 'multidisciplinary'
field.set('name', 'subject_area')
xml.find('.').append(field)
return data
for subject_area in raw.journal.subject_areas:
field = ET.Element('field')
field.text = subject_area
field.set('name', 'subject_area')
xml.find('.').append(field)
return data
class Keywords(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.keywords():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for language, keywords in raw.keywords().items():
for keyword in keywords:
field = ET.Element('field')
field.text = keyword
field.set('name', 'keyword_%s' % language)
xml.find('.').append(field)
return data
class IsCitable(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = 'is_true' if raw.document_type in CITABLE_DOCUMENT_TYPES else 'is_false'
field.set('name', 'is_citable')
xml.find('.').append(field)
return data
class JournalISSNs(plumber.Pipe):
def transform(self, data):
raw, xml = data
issns = set()
if raw.electronic_issn:
issns.add(raw.journal.electronic_issn)
if raw.print_issn:
issns.add(raw.journal.print_issn)
issns.add(raw.journal.scielo_issn)
for issn in issns:
field = ET.Element('field')
field.text = issn
field.set('name', 'issn')
xml.find('.').append(field)
return data
class DocumentID(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = '{0}-{1}'.format(raw.publisher_id, raw.collection_acronym)
field.set('name', 'id')
xml.find('.').append(field)
return data
class JournalTitle(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.journal.title
field.set('name', 'journal_title')
xml.find('.').append(field)
return data
class JournalTitle(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.journal.title
field.set('name', 'journal_title')
xml.find('.').append(field)
return data
class Permission(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.permissions:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.permissions.get('id', '')
field.set('name', 'use_license')
xml.append(field)
if raw.permissions.get('text', None):
field = ET.Element('field')
field.text = raw.permissions.get('text', '')
field.set('name', 'use_license_text')
xml.append(field)
if raw.permissions.get('url', None):
field = ET.Element('field')
field.text = raw.permissions.get('url', '')
field.set('name', 'use_license_uri')
xml.append(field)
return data
class Collection(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.collection_acronym
field.set('name', 'in')
xml.find('.').append(field)
return data
class DocumentType(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.document_type
field.set('name', 'type')
xml.find('.').append(field)
return data
class URL(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = '{0}'.format(raw.publisher_id)
field.set('name', 'ur')
xml.find('.').append(field)
return data
class Authors(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.authors:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for author in raw.authors:
field = ET.Element('field')
name = []
if 'surname' in author:
name.append(author['surname'])
if 'given_names' in author:
name.append(author['given_names'])
field.text = ', '.join(name)
field.set('name', 'au')
xml.find('.').append(field)
return data
class Orcid(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.authors:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for orcid in [i['orcid'] for i in raw.authors if i.get('orcid', None)]:
field = ET.Element('field')
field.text = orcid
field.set('name', 'orcid')
xml.find('.').append(field)
return data
class OriginalTitle(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_title():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.original_title()
field.set('name', 'ti')
xml.find('.').append(field)
return data
class Titles(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_title() and not raw.translated_titles():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.original_title()
field.set('name', 'ti_%s' % raw.original_language())
xml.find('.').append(field)
if not raw.translated_titles():
return data
for language, title in raw.translated_titles().items():
field = ET.Element('field')
field.text = title
field.set('name', 'ti_%s' % language)
xml.find('.').append(field)
return data
class Pages(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.start_page and not raw.end_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
pages = []
if raw.start_page:
pages.append(raw.start_page)
if raw.end_page:
pages.append(raw.end_page)
field = ET.Element('field')
field.text = '-'.join(pages)
field.set('name', 'pg')
xml.find('.').append(field)
return data
class DOI(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.doi:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.doi
field.set('name', 'doi')
xml.find('.').append(field)
return data
class WOKCI(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.journal.wos_citation_indexes:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for index in raw.journal.wos_citation_indexes:
field = ET.Element('field')
field.text = index.replace('&', '')
field.set('name', 'wok_citation_index')
xml.find('.').append(field)
return data
class WOKSC(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.journal.wos_subject_areas:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
for index in raw.journal.wos_subject_areas:
field = ET.Element('field')
field.text = index
field.set('name', 'wok_subject_categories')
xml.find('.').append(field)
return data
class Volume(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.issue.volume:
field = ET.Element('field')
field.text = raw.issue.volume
field.set('name', 'volume')
xml.find('.').append(field)
return data
class SupplementVolume(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.issue.supplement_volume:
field = ET.Element('field')
field.text = raw.issue.supplement_volume
field.set('name', 'supplement_volume')
xml.find('.').append(field)
return data
class Issue(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.issue.number:
field = ET.Element('field')
field.text = raw.issue.number
field.set('name', 'issue')
xml.find('.').append(field)
return data
class SupplementIssue(plumber.Pipe):
def transform(self, data):
raw, xml = data
if raw.issue.supplement_number:
field = ET.Element('field')
field.text = raw.issue.supplement_number
field.set('name', 'supplement_issue')
xml.find('.').append(field)
return data
class ElocationPage(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.elocation:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.elocation
field.set('name', 'elocation')
xml.find('.').append(field)
return data
class StartPage(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.start_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.start_page
field.set('name', 'start_page')
xml.find('.').append(field)
return data
class EndPage(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.end_page:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.end_page
field.set('name', 'end_page')
xml.find('.').append(field)
return data
class JournalAbbrevTitle(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.journal.abbreviated_title
field.set('name', 'ta')
xml.find('.').append(field)
return data
class Languages(plumber.Pipe):
def transform(self, data):
raw, xml = data
langs = set([i for i in raw.languages()])
langs.add(raw.original_language())
for language in langs:
field = ET.Element('field')
field.text = language
field.set('name', 'la')
xml.find('.').append(field)
return data
class AvailableLanguages(plumber.Pipe):
def transform(self, data):
raw, xml = data
langs = set([i for i in raw.languages()])
langs.add(raw.original_language())
if raw.translated_abstracts():
for lang in raw.translated_abstracts().keys():
langs.add(lang)
for language in langs:
field = ET.Element('field')
field.text = language
field.set('name', 'available_languages')
xml.find('.').append(field)
return data
class Fulltexts(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.fulltexts():
raise plumber.UnmetPrecondition()
def transform(self, data):
raw, xml = data
ft = raw.fulltexts()
# There is articles that does not have pdf
if 'pdf' in ft:
for language, url in ft['pdf'].items():
field = ET.Element('field')
field.text = url
field.set('name', 'fulltext_pdf_%s' % language)
xml.find('.').append(field)
if 'html' in ft:
for language, url in ft['html'].items():
field = ET.Element('field')
field.text = url
field.set('name', 'fulltext_html_%s' % language)
xml.find('.').append(field)
return data
class PublicationDate(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.publication_date
field.set('name', 'da')
xml.find('.').append(field)
return data
class SciELOPublicationDate(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.creation_date
field.set('name', 'scielo_publication_date')
xml.find('.').append(field)
return data
class ReceivedCitations(plumber.Pipe):
def transform(self, data):
raw, xml = data
result = CITEDBY.citedby_pid(raw.publisher_id, metaonly=True)
field = ET.Element('field')
field.text = str(result.get('article', {'total_received': 0})['total_received'])
field.set('name', 'total_received')
xml.find('.').append(field)
return data
class SciELOProcessingDate(plumber.Pipe):
def transform(self, data):
raw, xml = data
field = ET.Element('field')
field.text = raw.processing_date
field.set('name', 'scielo_processing_date')
xml.find('.').append(field)
return data
class Abstract(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.original_abstract() and not raw.translated_abstracts():
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
if raw.original_abstract():
field = ET.Element('field')
field.text = raw.original_abstract()
field.set('name', 'ab_%s' % raw.original_language())
xml.find('.').append(field)
if not raw.translated_abstracts():
return data
for language, abstract in raw.translated_abstracts().items():
field = ET.Element('field')
field.text = abstract
field.set('name', 'ab_%s' % language)
xml.find('.').append(field)
return data
class AffiliationCountry(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.mixed_affiliations:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
countries = set()
for affiliation in raw.mixed_affiliations:
if 'country' in affiliation:
countries.add(affiliation['country'])
for country in countries:
field = ET.Element('field')
field.text = country.strip()
field.set('name', 'aff_country')
xml.find('.').append(field)
return data
class AffiliationInstitution(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.mixed_affiliations:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
institutions = set()
for affiliation in raw.mixed_affiliations:
if 'institution' in affiliation:
institutions.add(affiliation['institution'])
for institution in institutions:
field = ET.Element('field')
field.text = institution.strip()
field.set('name', 'aff_institution')
xml.find('.').append(field)
return data
class Sponsor(plumber.Pipe):
def precond(data):
raw, xml = data
if not raw.project_sponsor:
raise plumber.UnmetPrecondition()
@plumber.precondition(precond)
def transform(self, data):
raw, xml = data
sponsors = set()
for sponsor in raw.project_sponsor:
if 'orgname' in sponsor:
sponsors.add(sponsor['orgname'])
for sponsor in sponsors:
field = ET.Element('field')
field.text = sponsor
field.set('name', 'sponsor')
xml.find('.').append(field)
return data
class TearDown(plumber.Pipe):
def transform(self, data):
raw, xml = data
return xml
| [
"[email protected]"
] | |
616481b2e75063bd42b700b4baac1bdbbd6f92b1 | 1804187f39dd6004250933b35ba9ce24297f32a5 | /car_importclass.py | 860b39b3d9c08872ea8be65c07d26f6029ef9c66 | [] | no_license | xiaomengxiangjia/Python | ecd2e3e8576364f15482669cb75b52b8790543f5 | 7f52a33d7956068d26347cf34d35c953b945a635 | refs/heads/master | 2020-03-20T23:01:09.981928 | 2018-08-23T09:04:53 | 2018-08-27T05:46:38 | 137,825,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,242 | py | """一个可用于表示汽车的类"""
class Car():
"""一次模拟汽车的简单尝试"""
def __init__(self, make, model, year):
"""初始化描述汽车的属性"""
self.make = make
self.model = model
self.year = year
self.odometer_reading = 0
def get_descriptive_name(self):
"""返回整洁的描述性名称"""
long_name = str(self.year) + ' ' + self.make + ' ' + self.model
return long_name.title()
def read_odometer(self):
"""打印一条消息,指出汽车的里程"""
print("This car has " + str(self.odometer_reading) + " miles on it.")
def update_odometer(self, mileage):
"""
将里程表读数设置为指定的值
拒绝将里程表往回拨
"""
if mileage >= self.odometer_reading:
self.odometer_reading = mileage
else:
print("You can't roll back an odometer!")
def increment_odometer(self, miles):
"""将里程表读数增加指定的量"""
self.odometer_reading += miles
"""一组用于表示电动汽车的类"""
class Battery():
"""一次模拟电动汽车电瓶的简单尝试"""
def __init__(self, battery_size=60):
"""初始化电瓶的属性"""
self.battery_size = battery_size
def describe_battery(self):
"""打印一条描述电瓶容量的消息"""
print("This car has a " + str(self.battery_size) + "-kwh battery.")
def get_range(self):
"""打印一条描述电瓶续航里程的消息"""
range = 200
if self.battery_size == 70:
range = 240
elif self.battery_size == 85:
range = 270
message = "This car can go approximately " + str(range)
message += " miles on a full charge."
print(message)
class ElectricCar(Car):
"""模拟电动汽车的独特之处"""
def __init__(self, make, model, year):
"""
初始化父类的属性,再初始化电动汽车特有的属性
"""
super().__init__(make, model, year)
self.battery = Battery()
| [
"[email protected]"
] | |
276bccd4f16fb7b435ac61d0da296658d2a152fd | 97ae427ff84c9b0450ed709dc55e1cc0e1edc096 | /til/future_til/class_level_operators.py | 02723ea43b703bfd62523ad8737ad110b21d2a4e | [] | no_license | OaklandPeters/til | 9081ac8b968223f4c92b38cf20cda90c92966628 | 12a1f7623916709211686d7817b93c7ef4d532d2 | refs/heads/master | 2021-01-17T14:16:48.285244 | 2016-06-20T14:23:40 | 2016-06-20T14:23:40 | 51,449,450 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | """
This has not been made into a TIL in til/python yet, because...
it does not work correctly atm.
However, I'm reasonably sure I can get it to work (since I've got type-level operatores to work in the past)
"""
#
# Class-level operators
#--------------------------
# Requires metaclasses
# To make this work with instance-level overrides is complicated
# ... I should look to the proper method lookup, as described here:
# https://blog.ionelmc.ro/2015/02/09/understanding-python-metaclasses/
#
# ... actually, I'm pretty sure I need to use something like my @pedanticmethod
# to make __mul__ work as both a classmethod and instancemethod
class OperatorMeta(type):
def __mul__(cls, other):
if hasattr(cls, '__mul__'):
return cls.__mul__(other)
else:
return type.__mul__(cls, other)
raise TypeError(str.format(
"unspported operand type(s) for *: '{0}' and '{1}'",
cls.__name__, type(other).__name__
))
class ThreeBase(metaclass=OperatorMeta):
base = 3
@classmethod
def __mul__(cls, value):
return cls.base * value
def __init__(self, base):
self.base = base
assert((ThreeBase * 5) == 15)
assert((ThreeBase(10) * 5) == 50 ) # WRONG. Still returns 15
# This does not work correctly, the problem being I forget how
# to make OperatorMeta.__mul__ proxy down to the instance level
# ... HOWEVER, if I look up the standard rules for method lookup,
# in relation to metaclasses (the standard metaclass being 'type')
# then that should show me what to do
| [
"[email protected]"
] | |
96164464f24ee51181a36ffef0bb4a7a56bde3c5 | 7f227c62d25d09496dc5aabd9d58fc971c7c820d | /main.py | 4601d7ce2812480fd23ff394cffb3d3cee64b020 | [] | no_license | 0x15F9/ColabDemo | 6cd821ad6295cb30a656d506372882f14428adab | 093b374b4493146c787b48f46a6633bb1351fe20 | refs/heads/master | 2023-03-24T01:59:34.293793 | 2021-03-22T07:37:24 | 2021-03-22T07:37:24 | 350,248,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38 | py | for i in range(10):
print("Hello") | [
"[email protected]"
] | |
b0e3a882a9cb2bf2f6a1e29d61545ed83bc64a05 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02882/s441562654.py | 2251e4e406faa3b13c7d32923f7711a41c800a0e | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | a, b, x = map(int, input().split())
if x > (a**2)*b/2:
t = 2*((a**2)*b-x)/(a**3)
else:
t = a*(b**2)/(2*x)
import math
ans = math.degrees(math.atan(t))
print(ans)
| [
"[email protected]"
] | |
7eae7b743e1fdb51757eab7546ee206614610ba1 | 43b4cabe8b711d9eb6988a17d0914cf95ac1c5a1 | /Lesson-2/7_BookmarkServer/BookmarkServer.py | b42fd8ceecfd9c42216f26e2953ac331e00dca63 | [] | no_license | fatih-iver/course-ud303 | c9aae321336c8e0b3ed1e671338cc993d04dc34b | 64d2107891cc24d303dffb98216a72505eeeb217 | refs/heads/master | 2020-03-24T21:57:30.923020 | 2018-08-04T11:50:20 | 2018-08-04T11:50:20 | 143,059,407 | 0 | 0 | null | 2018-07-31T19:40:50 | 2018-07-31T19:40:49 | null | UTF-8 | Python | false | false | 5,827 | py | #!/usr/bin/env python3
#
# A *bookmark server* or URI shortener that maintains a mapping (dictionary)
# between short names and long URIs, checking that each new URI added to the
# mapping actually works (i.e. returns a 200 OK).
#
# This server is intended to serve three kinds of requests:
#
# * A GET request to the / (root) path. The server returns a form allowing
# the user to submit a new name/URI pairing. The form also includes a
# listing of all the known pairings.
# * A POST request containing "longuri" and "shortname" fields. The server
# checks that the URI is valid (by requesting it), and if so, stores the
# mapping from shortname to longuri in its dictionary. The server then
# redirects back to the root path.
# * A GET request whose path contains a short name. The server looks up
# that short name in its dictionary and redirects to the corresponding
# long URI.
#
# Your job in this exercise is to finish the server code.
#
# Here are the steps you need to complete:
#
# 1. Write the CheckURI function, which takes a URI and returns True if a
# request to that URI returns a 200 OK, and False otherwise.
#
# 2. Write the code inside do_GET that sends a 303 redirect to a known name.
#
# 3. Write the code inside do_POST that sends a 400 error if the form fields
# are missing.
#
# 4. Write the code inside do_POST that sends a 303 redirect to the form
# after saving a newly submitted URI.
#
# 5. Write the code inside do_POST that sends a 404 error if a URI is not
# successfully checked (i.e. if CheckURI returns false).
#
# In each step, you'll need to delete a line of code that raises the
# NotImplementedError exception. These are there as placeholders in the
# starter code.
#
# After writing each step, restart the server and run test.py to test it.
import http.server
import requests
from urllib.parse import unquote, parse_qs
memory = {}
form = '''<!DOCTYPE html>
<title>Bookmark Server</title>
<form method="POST">
<label>Long URI:
<input name="longuri">
</label>
<br>
<label>Short name:
<input name="shortname">
</label>
<br>
<button type="submit">Save it!</button>
</form>
<p>URIs I know about:
<pre>
{}
</pre>
'''
def CheckURI(uri, timeout=5):
'''Check whether this URI is reachable, i.e. does it return a 200 OK?
This function returns True if a GET request to uri returns a 200 OK, and
False if that GET request returns any other response, or doesn't return
(i.e. times out).
'''
try:
r = requests.get(uri, timeout=timeout)
# If the GET request returns, was it a 200 OK?
return r.status_code == 200
except requests.RequestException:
# If the GET request raised an exception, it's not OK.
return False
class Shortener(http.server.BaseHTTPRequestHandler):
def do_GET(self):
# A GET request will either be for / (the root path) or for /some-name.
# Strip off the / and we have either empty string or a name.
name = unquote(self.path[1:])
if name:
if name in memory:
# 2. Send a 303 redirect to the long URI in memory[name].
self.send_response(303)
longuri = memory[name]
self.send_header('Location', longuri)
self.end_headers()
else:
# We don't know that name! Send a 404 error.
self.send_response(404)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("I don't know '{}'.".format(name).encode())
else:
# Root path. Send the form.
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
# List the known associations in the form.
known = "\n".join("{} : {}".format(key, memory[key])
for key in sorted(memory.keys()))
self.wfile.write(form.format(known).encode())
def do_POST(self):
# Decode the form data.
length = int(self.headers.get('Content-length', 0))
body = self.rfile.read(length).decode()
params = parse_qs(body)
# Check that the user submitted the form fields.
if "longuri" not in params or "shortname" not in params:
# 3. Serve a 400 error with a useful message.
self.send_response(400)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("Missing form fields!".encode())
return
longuri = params["longuri"][0]
shortname = params["shortname"][0]
if CheckURI(longuri):
# This URI is good! Remember it under the specified name.
memory[shortname] = longuri
# 4. Serve a redirect to the root page (the form).
self.send_response(303)
self.send_header('Location', '/')
self.end_headers()
else:
# Didn't successfully fetch the long URI.
# 5. Send a 404 error with a useful message.
self.send_response(404)
self.send_header('Content-type', 'text/plain; charset=utf-8')
self.end_headers()
self.wfile.write("Couldn't fetch URI '{}'. Sorry!".format(longuri).encode())
if __name__ == '__main__':
port = int(os.environ.get('PORT', 8000)) # Use PORT if it's there.
server_address = ('', port)
httpd = http.server.HTTPServer(server_address, Shortener)
httpd.serve_forever()
| [
"[email protected]"
] | |
759f2892a4b03efd81ece2f4d33a6eba2ba16139 | 5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5 | /blimgui/dist/OpenGL/raw/GLX/MESA/query_renderer.py | 072891b41d9ef525470951c92ec96a668f34048f | [
"MIT"
] | permissive | juso40/bl2sdk_Mods | 8422a37ca9c2c2bbf231a2399cbcb84379b7e848 | 29f79c41cfb49ea5b1dd1bec559795727e868558 | refs/heads/master | 2023-08-15T02:28:38.142874 | 2023-07-22T21:48:01 | 2023-07-22T21:48:01 | 188,486,371 | 42 | 110 | MIT | 2022-11-20T09:47:56 | 2019-05-24T20:55:10 | Python | UTF-8 | Python | false | false | 2,034 | py | '''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLX import _types as _cs
# End users want this...
from OpenGL.raw.GLX._types import *
from OpenGL.raw.GLX import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLX_MESA_query_renderer'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLX,'GLX_MESA_query_renderer',error_checker=_errors._error_checker)
GLX_RENDERER_ACCELERATED_MESA=_C('GLX_RENDERER_ACCELERATED_MESA',0x8186)
GLX_RENDERER_DEVICE_ID_MESA=_C('GLX_RENDERER_DEVICE_ID_MESA',0x8184)
GLX_RENDERER_OPENGL_COMPATIBILITY_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_COMPATIBILITY_PROFILE_VERSION_MESA',0x818B)
GLX_RENDERER_OPENGL_CORE_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_CORE_PROFILE_VERSION_MESA',0x818A)
GLX_RENDERER_OPENGL_ES2_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_ES2_PROFILE_VERSION_MESA',0x818D)
GLX_RENDERER_OPENGL_ES_PROFILE_VERSION_MESA=_C('GLX_RENDERER_OPENGL_ES_PROFILE_VERSION_MESA',0x818C)
GLX_RENDERER_PREFERRED_PROFILE_MESA=_C('GLX_RENDERER_PREFERRED_PROFILE_MESA',0x8189)
GLX_RENDERER_UNIFIED_MEMORY_ARCHITECTURE_MESA=_C('GLX_RENDERER_UNIFIED_MEMORY_ARCHITECTURE_MESA',0x8188)
GLX_RENDERER_VENDOR_ID_MESA=_C('GLX_RENDERER_VENDOR_ID_MESA',0x8183)
GLX_RENDERER_VERSION_MESA=_C('GLX_RENDERER_VERSION_MESA',0x8185)
GLX_RENDERER_VIDEO_MEMORY_MESA=_C('GLX_RENDERER_VIDEO_MEMORY_MESA',0x8187)
@_f
@_p.types(_cs.Bool,_cs.c_int,ctypes.POINTER(_cs.c_uint))
def glXQueryCurrentRendererIntegerMESA(attribute,value):pass
@_f
@_p.types(ctypes.c_char_p,_cs.c_int)
def glXQueryCurrentRendererStringMESA(attribute):pass
@_f
@_p.types(_cs.Bool,ctypes.POINTER(_cs.Display),_cs.c_int,_cs.c_int,_cs.c_int,ctypes.POINTER(_cs.c_uint))
def glXQueryRendererIntegerMESA(dpy,screen,renderer,attribute,value):pass
@_f
@_p.types(ctypes.c_char_p,ctypes.POINTER(_cs.Display),_cs.c_int,_cs.c_int,_cs.c_int)
def glXQueryRendererStringMESA(dpy,screen,renderer,attribute):pass
| [
"[email protected]"
] | |
a585065a3adc8bc699bf8ba1c78b67358d1ea23c | c99c272181eb43df688cc6af10bfb17659014ab9 | /03_ОOP-Python/01-Defining Classes/02_Exercise/07_GuildSystem/project/venv/Scripts/easy_install-script.py | 479119ad1bbdbfaf2e56c4f7f55eb619444da6c2 | [] | no_license | LachezarKostov/SoftUni | ce89d11a4796c10c8975dc5c090edecac993cb03 | 47559e9f01f7aabd73d84aa175be37140e2d5621 | refs/heads/master | 2023-01-29T20:49:57.196136 | 2020-12-10T12:34:09 | 2020-12-10T12:34:09 | 283,491,508 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 508 | py | #!"C:\Users\dream\Desktop\Python\OP-Python\01-Defining Classes\02_Exercise\07_GuildSystem\project\venv\Scripts\python.exe"
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install')()
)
| [
"[email protected]"
] | |
a23776a69c1c30f0a065d46cab1f8ca2e0904e26 | 741191d21d1248b0501ca4fcba7c548998e82b3c | /spidermanage/spidertool/sniffertool.py | 3267591c39100a33d9c29eef25f06e2c5b462eba | [] | no_license | CryptoConsultants/toolforspider | 27fd2df948846c2a40908f3f3deea1422bea4410 | eb1c3a362c360852be734d8f296512e02bf3b045 | refs/heads/master | 2021-01-17T07:28:11.671526 | 2016-02-19T09:06:15 | 2016-02-19T09:06:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,427 | py | #!/usr/bin/python
#coding:utf-8
'''
Created on 2015年10月29日
@author: sherwel
'''
import sys
import nmap
import os
import time
import SQLTool
import Sqldatatask
import config
import Sqldata
from numpy.numarray.numerictypes import IsType
import connectpool
import portscantask
import getLocationTool
reload(sys) # Python2.5 初始化后会删除 sys.setdefaultencoding 这个方法,我们需要重新载入
class SniffrtTool(object):
'''
classdocs
'''
def __init__(self):
'''
Constructor
'''
try:
self.nma = nmap.PortScanner() # instantiate nmap.PortScanner object
self.params='-A -Pn -sC -R -v -O '
# self.params='-sV -T4 -O ' #快捷扫描加强版
# self.params='-sS -sU -T4 -A -v' #深入扫描
except nmap.PortScannerError:
print('Nmap not found', sys.exc_info()[0])
except:
print('Unexpected error:', sys.exc_info()[0])
self.config=config.Config
self.sqlTool=Sqldatatask.getObject()
# self.sqlTool=SQLTool.getObject()
self.portscan=portscantask.getObject()
self.getlocationtool=getLocationTool.getObject()
def scaninfo(self,hosts='localhost', port='', arguments='',hignpersmission='0',callback=''):
if callback=='':
callback=self.callback_result
orders=''
if port!='':
orders+=port
else :
orders=None
try:
if hignpersmission=='0':
print '我在这里49'
print hosts,orders,self.params+arguments
acsn_result=self.nma.scan(hosts=hosts,ports= orders,arguments=self.params+arguments)
#acsn_result=self.nma.scan(hosts=hosts,ports= orders,arguments=arguments)
print acsn_result
print '我在这里51'
return callback(acsn_result)
else:
print '我在这里52'
return callback(self.nma.scan(hosts=hosts,ports= orders,arguments=arguments,callback=callback) )
except nmap.PortScannerError,e:
print e
print '我在这里57'
return ''
except:
print('Unexpected error:', sys.exc_info()[0])
print '我在这里62'
return ''
def callback_result(self,scan_result):
print '——————'
tmp=scan_result
for i in tmp['scan'].keys():
host=i
result=''
try:
# result = u"ip地址:%s 主机名:%s ...... %s\n" %(host,tmp['scan'][host].get('hostnames','null'),tmp['scan'][host]['status'].get('state','null'))
# self.sqlTool.connectdb()
# print tmp['scan'][host].get('hostname','null')
# if 'osclass' in tmp['scan'][host].keys():
# result +=u"系统信息 : %s %s %s 准确度:%s \n" % (str(tmp['scan'][host]['osclass'].get('vendor','null')),str(tmp['scan'][host]['osclass'].get('osfamily','null')),str(tmp['scan'][host]['osclass'].get('osgen','null')),str(tmp['scan'][host]['osclass'].get('accuracy','null')))
# print result
temphosts=str(host)
localtime=str(time.strftime("%Y-%m-%d %X", time.localtime()))
self.getlocationtool.add_work([temphosts])
try :
tempvendor=str(tmp['scan'][host]['osmatch'][0]['osclass'][0].get('vendor','null'))
temposfamily=str(tmp['scan'][host]['osmatch'][0]['osclass'][0].get('osfamily','null'))
temposgen=str(tmp['scan'][host]['osmatch'][0]['osclass'][0].get('osgen','null'))
tempaccuracy=str(tmp['scan'][host]['osmatch'][0]['osclass'][0].get('accuracy','null'))
temphostname=''
for i in tmp['scan'][host]['hostnames']:
temphostname+=str(i.get('name','null'))+' '
tempstate=str(tmp['scan'][host]['status'].get('state','null'))
# print temphosts,tempvendor,temposfamily,temposgen,tempaccuracy,localtime
# self.sqlTool.replaceinserttableinfo_byparams(table=self.config.iptable,select_params= ['ip','vendor','osfamily','osgen','accurate','updatetime','hostname','state'],insert_values= [(temphosts,tempvendor,temposfamily,temposgen,tempaccuracy,localtime,temphostname,tempstate)])
sqldatawprk=[]
dic={"table":self.config.iptable,"select_params": ['ip','vendor','osfamily','osgen','accurate','updatetime','hostname','state'],"insert_values": [(temphosts,tempvendor,temposfamily,temposgen,tempaccuracy,localtime,temphostname,tempstate)]}
tempwprk=Sqldata.SqlData('replaceinserttableinfo_byparams',dic)
sqldatawprk.append(tempwprk)
self.sqlTool.add_work(sqldatawprk)
except Exception,e:
print 'nmap system error'+str(e)
if 'tcp' in tmp['scan'][host].keys():
ports = tmp['scan'][host]['tcp'].keys()
for port in ports:
# portinfo = " port : %s name:%s state : %s product : %s version :%s script:%s \n" %(port,tmp['scan'][host]['tcp'][port].get('name',''),tmp['scan'][host]['tcp'][port].get('state',''), tmp['scan'][host]['tcp'][port].get('product',''),tmp['scan'][host]['tcp'][port].get('version',''),tmp['scan'][host]['tcp'][port].get('script',''))
tempport=str(port)
tempportname=str(tmp['scan'][host]['tcp'][port].get('name',''))
tempportstate=str(tmp['scan'][host]['tcp'][port].get('state',''))
tempproduct=str(tmp['scan'][host]['tcp'][port].get('product',''))
tempportversion=str(tmp['scan'][host]['tcp'][port].get('version',''))
tempscript=str(tmp['scan'][host]['tcp'][port].get('script',''))
# self.sqlTool.replaceinserttableinfo_byparams(table=self.config.porttable,select_params= ['ip','port','timesearch','state','name','product','version','script'],insert_values= [(temphosts,tempport,localtime,tempportstate,tempportname,tempproduct,tempportversion,tempscript)])
sqldatawprk=[]
dic={"table":self.config.porttable,"select_params": ['ip','port','timesearch','state','name','product','version','script'],"insert_values": [(temphosts,tempport,localtime,tempportstate,tempportname,tempproduct,tempportversion,tempscript)]}
tempwprk=Sqldata.SqlData('replaceinserttableinfo_byparams',dic)
sqldatawprk.append(tempwprk)
self.sqlTool.add_work(sqldatawprk)
self.portscan.add_work([(tempportname,temphosts,tempport,tempportstate)])
elif 'udp' in tmp['scan'][host].keys():
ports = tmp['scan'][host]['udp'].keys()
for port in ports:
# portinfo = " port : %s name:%s state : %s product : %s version :%s script:%s \n" %(port,tmp['scan'][host]['udp'][port].get('name',''),tmp['scan'][host]['udp'][port].get('state',''), tmp['scan'][host]['udp'][port].get('product',''),tmp['scan'][host]['udp'][port].get('version',''),tmp['scan'][host]['udp'][port].get('script',''))
# result = result + portinfo
tempport=str(port)
tempportname=str(tmp['scan'][host]['udp'][port].get('name',''))
tempportstate=str(tmp['scan'][host]['udp'][port].get('state',''))
tempproduct=str(tmp['scan'][host]['udp'][port].get('product',''))
tempportversion=str(tmp['scan'][host]['udp'][port].get('version',''))
tempscript=str(tmp['scan'][host]['udp'][port].get('script',''))
# self.sqlTool.replaceinserttableinfo_byparams(table=self.config.porttable,select_params= ['ip','port','timesearch','state','name','product','version','script'],insert_values= [(temphosts,tempport,localtime,tempportstate,tempportname,tempproduct,tempportversion,tempscript)])
sqldatawprk=[]
dic={"table":self.config.porttable,"select_params": ['ip','port','timesearch','state','name','product','version','script'],"insert_values": [(temphosts,tempport,localtime,tempportstate,tempportname,tempproduct,tempportversion,tempscript)]}
tempwprk=Sqldata.SqlData('replaceinserttableinfo_byparams',dic)
sqldatawprk.append(tempwprk)
self.sqlTool.add_work(sqldatawprk)
except Exception,e:
print 'nmap error'+str(e)
except IOError,e:
print '错误IOError'+str(e)
except KeyError,e:
print '不存在该信息'+str(e)
finally:
# print result
return str(scan_result)
def scanaddress(self,hosts=[], ports=[],arguments=''):
temp=''
for i in range(len(hosts)):
if len(ports)<=i:
result=self.scaninfo(hosts=hosts[i],arguments=arguments)
if result is None:
pass
else:
temp+=result
else:
result=self.scaninfo(hosts=hosts[i], port=ports[i],arguments=arguments)
if result is None:
pass
else:
temp+=result
return temp
def isrunning(self):
return self.nma.has_host(self.host)
def callback_resultl(host, scan_result):
print '———不触发这个函数———'
tmp=scan_result
result=''
try:
result = u"ip地址:%s 主机名:%s ...... %s\n" %(host,tmp['scan'][host]['hostname'],tmp['scan'][host]['status']['state'])
if 'osclass' in tmp['scan'][host].keys():
result +=u"系统信息 : %s %s %s 准确度:%s \n" % (str(tmp['scan'][host]['osclass']['vendor']),str(tmp['scan'][host]['osclass']['osfamily']),str(tmp['scan'][host]['osclass']['osgen']),str(tmp['scan'][host]['osclass']['accuracy']))
if 'tcp' in tmp['scan'][host].keys():
ports = tmp['scan'][host]['tcp'].keys()
for port in ports:
portinfo = " port : %s name:%s state : %s product : %s version :%s script:%s \n" %(port,tmp['scan'][host]['tcp'][port]['name'],tmp['scan'][host]['tcp'][port]['state'], tmp['scan'][host]['tcp'][port]['product'],tmp['scan'][host]['tcp'][port]['version'],tmp['scan'][host]['tcp'][port]['script'])
print portinfo
result+= portinfo
elif 'udp' in tmp['scan'][host].keys():
ports = tmp['scan'][host]['udp'].keys()
for port in ports:
portinfo = " port : %s name:%s state : %s product : %s version :%s script:%s \n" %(port,tmp['scan'][host]['udp'][port]['name'],tmp['scan'][host]['udp'][port]['state'], tmp['scan'][host]['udp'][port]['product'],tmp['scan'][host]['udp'][port]['version'],tmp['scan'][host]['udp'][port]['script'])
result += portinfo
except Exception,e:
print e
except IOError,e:
print '错误IOError'+str(e)
except KeyError,e:
print '不存在该信息'+str(e)
finally:
return result
"""
def callback_resultl(host, scan_result):
print scan_result
print scan_result['scan']
f = open('abc.xml','w+')
f.write(str(scan_result))
f.close()
"""
order=' -P0 -sV -sC -sU -O -v -R -sT '
orderq='-A -P0 -Pn -sC -p '
if __name__ == "__main__":
temp=SniffrtTool()
# hosts=['www.cctv.com','localhost','www.baidu.com']'www.cctv.com' www.vip.com
hosts=['www.cctv.com']
temp.scanaddress(hosts,ports=['80'],arguments='')
# print time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
| [
"[email protected]"
] | |
d518f64f0bbd5273253b9da996adb85350151238 | 730a0291d90bf220d162791287e422bc4225d164 | /samples/StackResult/fsmpy/StackSynchronized.py | ce716b3963340b6d02b4f30ab46f82112d7579f6 | [
"BSD-3-Clause"
] | permissive | jon-jacky/PyModel | 27442d062e615bd0bf1bd16d86ae56cc4d3dc443 | 457ea284ea20703885f8e57fa5c1891051be9b03 | refs/heads/master | 2022-11-02T14:08:47.012661 | 2022-10-16T09:47:53 | 2022-10-16T09:47:53 | 2,034,133 | 75 | 36 | NOASSERTION | 2021-07-11T21:15:08 | 2011-07-12T04:23:02 | Python | UTF-8 | Python | false | false | 1,026 | py |
# pma.py Stack StackOneScenario -m 6 -o StackSynchronized
# 4 states, 6 transitions, 4 accepting states, 0 unsafe states, 0 finished and 0 deadend states
# actions here are just labels, but must be symbols with __name__ attribute
def Push(): pass
def Pop(): pass
# states, key of each state here is its number in graph etc. below
states = {
0 : {'StackOneScenario': 0, 'Stack': {'stack': []}},
1 : {'StackOneScenario': 0, 'Stack': {'stack': [1]}},
2 : {'StackOneScenario': 0, 'Stack': {'stack': [1, 1]}},
3 : {'StackOneScenario': 0, 'Stack': {'stack': [1, 1, 1]}},
}
# initial state, accepting states, unsafe states, frontier states, deadend states
initial = 0
accepting = [0, 1, 2, 3]
unsafe = []
frontier = []
finished = []
deadend = []
runstarts = [0]
# finite state machine, list of tuples: (current, (action, args, result), next)
graph = (
(0, (Push, (1,), None), 1),
(1, (Pop, (), 1), 0),
(1, (Push, (1,), None), 2),
(2, (Pop, (), 1), 1),
(2, (Push, (1,), None), 3),
(3, (Pop, (), 1), 2),
)
| [
"[email protected]"
] | |
a914ff8c2d0018797ec75f0eb379efac9c21abef | c0a5ff5f77943a9529512e6b27148f3318ab5264 | /vowels2.py | 9fe9e3f321664f0f5ebedae52821be5fdb7ac104 | [] | no_license | smatthewenglish/head_first_python | b15cc7260fa6607759778ac37d86006f803462a9 | 6e783ce541d5462fb2f84cc901c713fcf5895240 | refs/heads/master | 2023-03-28T14:50:16.857613 | 2021-03-31T16:41:14 | 2021-03-31T16:41:14 | 350,149,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | vowels = ['a', 'e', 'i', 'o', 'u']
#word = "Milliways"
word = input("Provide a word to search for vowels: ")
found = []
for letter in word:
if letter in vowels:
#print(letter)
if letter not in found:
found.append(letter)
for vowel in found:
print(vowel)
| [
"[email protected]"
] | |
bf7cccfc45cdf2461987920d5a0b5fcb107fe227 | 5488617b1b05c436b1f8c8642ea75ca754719f8d | /TW_study/LimitCode/tW_measurment/mlfitNormsToText.py | 7cbfc9933869090ddc8caf40ffac104930662672 | [] | no_license | wenxingfang/TW_Top | fdb1ba136be6ace8fdacaade58cb4ca4fcdc3c9e | 389e76c904d08a59d9141b9b66ec15d2583f8e9a | refs/heads/master | 2021-02-05T06:54:27.908688 | 2020-02-28T13:24:00 | 2020-02-28T13:24:00 | 243,754,087 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,037 | py | import re
from sys import argv, stdout, stderr, exit
# import ROOT with a fix to get batch mode (http://root.cern.ch/phpBB3/viewtopic.php?t=3198)
argv.append( '-b-' )
import ROOT
ROOT.gROOT.SetBatch(True)
argv.remove( '-b-' )
if len(argv) == 0: raise RuntimeError, "Usage: mlfitNormsToText.py [ -u ] mlfit.root";
errors = False
if len(argv) > 2 and argv[1] == "-u":
errors = True
argv[1] = argv[2];
file = ROOT.TFile.Open(argv[1]);
prefit = file.Get("norm_prefit")
fit_s = file.Get("norm_fit_s")
fit_b = file.Get("norm_fit_b")
if prefit == None: stderr.write("Missing fit_s in %s. Did you run MaxLikelihoodFit in a recent-enough version of combine and with --saveNorm?\n" % file);
if fit_s == None: raise RuntimeError, "Missing fit_s in %s. Did you run MaxLikelihoodFit with --saveNorm?" % file;
if fit_b == None: raise RuntimeError, "Missing fit_b in %s. Did you run MaxLikelihoodFit with --saveNorm?" % file;
iter = fit_s.createIterator()
while True:
norm_s = iter.Next()
if norm_s == None: break;
norm_b = fit_b.find(norm_s.GetName())
norm_p = prefit.find(norm_s.GetName()) if prefit else None
m = re.match(r"(\w+)/(\w+)", norm_s.GetName());
if m == None: m = re.match(r"n_exp_(?:final_)?(?:bin)+(\w+)_proc_(\w+)", norm_s.GetName());
if m == None: raise RuntimeError, "Non-conforming object name %s" % norm_s.GetName()
if norm_b == None: raise RuntimeError, "Missing normalization %s for background fit" % norm_s.GetName()
if prefit and norm_p and errors:
print "%-30s %-30s %7.3f +/- %7.3f %7.3f +/- %7.3f %7.3f +/- %7.3f" % (m.group(1), m.group(2), norm_p.getVal(), norm_p.getError(), norm_s.getVal(), norm_s.getError(), norm_b.getVal(), norm_b.getError())
else:
if errors:
print "%-30s %-30s %7.3f +/- %7.3f %7.3f +/- %7.3f" % (m.group(1), m.group(2), norm_s.getVal(), norm_s.getError(), norm_b.getVal(), norm_b.getError())
else:
print "%-30s %-30s %7.3f %7.3f" % (m.group(1), m.group(2), norm_s.getVal(), norm_b.getVal())
| [
"[email protected]"
] | |
234f9d0be069bd885e1b1e25db82bd2eb4e0e97e | d765d19f80a6bfed71685838306f2d91f6a5a7dd | /rdt/rdt21.py | 0c2bba984d0b3daf478d990edda454a24d739487 | [] | no_license | EliasFarhan/CompNet | 1f1f83e6babdb688e1d626117cdb50a642a9d2a9 | c95b36c12a7a0a0d0ac5ecdb41e1b227c3973de0 | refs/heads/master | 2021-07-16T20:33:56.803384 | 2020-09-15T18:54:18 | 2020-09-15T18:54:18 | 210,541,643 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,839 | py | from rdt.base import *
from rdt.rdt20 import ChannelRdt20
class SenderRdt21(Sender):
last_packet = ""
sequence_nmb = 1
msg_lock = threading.Lock()
def send_data(self, data, resend=False):
if not resend:
self.msg_lock.acquire()
self.last_packet = data
text_data = data.encode()
packet = bytearray(len(text_data) + 2)
packet[1] = self.sequence_nmb.to_bytes(8, byteorder='little')[0]
check_sum = 0
for byte in text_data:
check_sum += byte
check_sum += packet[1]
packet[0] = check_sum.to_bytes(8, byteorder="little")[0]
packet[2:len(text_data) + 2] = text_data
self.channel.send_msg(packet)
def receive_response(self, response):
check_sum = 0
for byte in response[0:2]:
check_sum += byte
if check_sum.to_bytes(8, byteorder='little')[0] != response[3]:
print("[Error] Bad response checksum : need to send the last packet again: "+self.last_packet)
self.send_data(self.last_packet, resend=True)
return
if b"ACK" in response:
print("[ACK] Packet went well")
self.sequence_nmb += 1
self.msg_lock.release()
elif b"NAK" in response:
print("[NAK] Need to send packet again")
self.send_data(self.last_packet, resend=True)
else:
print("[Error] Bad response : need to send the last packet again")
self.send_data(self.last_packet, resend=True)
class ReceiverRdt21(Receiver):
sequence_number = 0
def receive_data(self, data):
check_sum = data[0]
sequence_nmb = data[1]
text_data = data[2:]
byte_sum = 0
response = bytearray(4)
for byte in text_data:
byte_sum += byte
byte_sum += sequence_nmb
if byte_sum.to_bytes(8, byteorder="little")[0] == check_sum:
if self.sequence_number != sequence_nmb:
super().receive_data(text_data)
self.sequence_number = sequence_nmb
response[0:2] = b"ACK"
byte_sum = 0
for byte in response[0:2]:
byte_sum += byte
response[3] = byte_sum.to_bytes(8, byteorder='little')[0]
self.send_response(response)
else:
response[0:2] = b"NAK"
byte_sum = 0
for byte in response[0:2]:
byte_sum += byte
response[3] = byte_sum.to_bytes(8, byteorder='little')[0]
self.send_response(response)
def send_response(self, response):
super().send_response(response)
def main():
sim = Simulation(sender=SenderRdt21(), channel=ChannelRdt20(), receiver=ReceiverRdt21())
sim.simulate()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
901c2725f19e802f0cfd00ad38118bb3d1511a0c | 6490638f15a2dfbe0cec9725186f9784d57c92f0 | /SPACEUI/SPACEgui.py | 18a2f4d304f7717a4741a891fc0f833466a08ac4 | [
"MIT"
] | permissive | khawatkom/SpacePyLibrary | af9c490ef796b9d37a13298c41df1fb5bf6b3cee | c94415e9d85519f345fc56938198ac2537c0c6d0 | refs/heads/master | 2020-05-14T21:52:39.388979 | 2019-04-17T17:06:04 | 2019-04-17T17:06:04 | 181,970,668 | 1 | 0 | null | 2019-04-17T21:26:44 | 2019-04-17T21:26:44 | null | UTF-8 | Python | false | false | 33,911 | py | #******************************************************************************
# (C) 2018, Stefan Korner, Austria *
# *
# The Space Python Library is free software; you can redistribute it and/or *
# modify it under under the terms of the MIT License as published by the *
# Massachusetts Institute of Technology. *
# *
# The Space Python Library is distributed in the hope that it will be useful, *
# but WITHOUT ANY WARRANTY; without even the implied warranty of *
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MIT License *
# for more details. *
#******************************************************************************
# Space Segment Simulation GUI *
#******************************************************************************
import tkinter
from tkinter import filedialog, simpledialog
from UTIL.SYS import Error, LOG, LOG_INFO, LOG_WARNING, LOG_ERROR
import SCOS.ENV
import SPACE.IF
import UI.TKI
import UTIL.TIME
#############
# constants #
#############
COLOR_BUTTON_FG = "#FFFFFF"
COLOR_BUTTON_BG = "#808080"
COLOR_INITIALISED = "#FFFF00"
COLOR_CONNECTED = "#00FF00"
COLOR_ON_OK = "#00FF00"
COLOR_ON_NOK = "#FF0000"
###########
# classes #
###########
# =============================================================================
class TMpacketDetails(tkinter.Frame, UI.TKI.AppGrid):
"""Displays the packet details, implemented as tkinter.Frame"""
# ---------------------------------------------------------------------------
def __init__(self, master):
tkinter.Frame.__init__(self, master, relief=tkinter.GROOVE, borderwidth=1)
# --- filler ---
filler = tkinter.Label(self)
self.appGrid(filler, row=0, columnspan=2, rowweight=0)
# packet name
self.pktNameField = UI.TKI.ValueField(self, row=1, label="Packet name:")
# packet description
self.pktDescrField = UI.TKI.ValueField(self, row=2, label="Packet description:")
# SPID
self.pktSPIDfield = UI.TKI.ValueField(self, row=3, label="Packet SPID:")
# APID
self.pktAPIDfield = UI.TKI.ValueField(self, row=4, label="Packet APID:")
# Type
self.pktTypeField = UI.TKI.ValueField(self, row=5, label="Packet Type:")
# Subtype
self.pktSubtypeField = UI.TKI.ValueField(self, row=6, label="Packet Subtype:")
# PI1
self.pktPI1field = UI.TKI.ValueField(self, row=7, label="Packet PI1:")
# PI2
self.pktPI2field = UI.TKI.ValueField(self, row=8, label="Packet PI2:")
# --- parameter listbox ---
label = tkinter.Label(self, text="Parameters")
self.appGrid(label, row=0, column=2, rowweight=0)
self.parametersListbox = UI.TKI.ScrolledListbox(self, selectmode=tkinter.SINGLE)
self.appGrid(self.parametersListbox, row=1, column=2, rowspan=8, rowweight=0, columnweight=1)
# --- filler ---
filler = tkinter.Label(self)
self.appGrid(filler, row=9, columnspan=3, rowweight=0)
# parameter names
self.parameterNamesField = UI.TKI.InputField(self, row=10, label="Parameter names: optional")
self.appGrid(self.parameterNamesField.field, row=10, column=1, columnspan=2, rowweight=0)
# parameter values
self.parameterValuesField = UI.TKI.InputField(self, row=11, label="Parameter value: optional")
self.appGrid(self.parameterValuesField.field, row=11, column=1, columnspan=2, rowweight=0)
# --- filler ---
filler = tkinter.Label(self)
self.appGrid(filler, row=12, columnspan=3, rowweight=0)
# ---------------------------------------------------------------------------
def update(self, tmPktDef):
"""Update the packet fields"""
# fetch the data
pktName = ""
pktDescr = ""
pktSPID = ""
pktAPID = ""
pktType = ""
pktSType = ""
pktPI1val = ""
pktPI2val = ""
tmParamExtractions = []
if tmPktDef != None:
pktName = tmPktDef.pktName
pktDescr = tmPktDef.pktDescr
pktSPID = tmPktDef.pktSPID
pktAPID = tmPktDef.pktAPID
pktType = tmPktDef.pktType
pktSType = tmPktDef.pktSType
if tmPktDef.pktPI1val != None:
pktPI1val = tmPktDef.pktPI1val
if tmPktDef.pktPI2val != None:
pktPI2val = tmPktDef.pktPI2val
tmParamExtractions = tmPktDef.getParamExtractions()
# write the data into the GUI
self.pktNameField.set(pktName)
self.pktDescrField.set(pktDescr)
self.pktSPIDfield.set(pktSPID)
self.pktAPIDfield.set(pktAPID)
self.pktTypeField.set(pktType)
self.pktSubtypeField.set(pktSType)
self.pktPI1field.set(pktPI1val)
self.pktPI2field.set(pktPI2val)
lrow = 0
self.parametersListbox.list().delete(0, tkinter.END)
for tmParamExtraction in tmParamExtractions:
if tmParamExtraction.piValue:
continue
text = tmParamExtraction.name + " ---> " + tmParamExtraction.descr
self.parametersListbox.list().insert(lrow, text)
lrow += 1
# =============================================================================
class TMpacketBrowser(simpledialog.Dialog, UI.TKI.AppGrid):
"""Browser for TM packets"""
# ---------------------------------------------------------------------------
def __init__(self, master, title, prompt=""):
"""Read the MIB for obtaining the initialisation data"""
# initialise the dialog
self.prompt = prompt
self.listboxCurrent = None
self.afterID = None
simpledialog.Dialog.__init__(self, master, title=title)
if self.afterID != None:
self.after_cancel(self.afterID)
# ---------------------------------------------------------------------------
def body(self, master):
"""Intialise the dialog"""
row=0
if self.prompt != "":
label = tkinter.Label(master, text=self.prompt)
label.grid(row=row, column=0, columnspan=4)
row += 1
label = tkinter.Label(master)
label.grid(row=row, column=0, columnspan=4)
row += 1
# scrolled list box
self.slistbox = UI.TKI.ScrolledListbox(master, selectmode=tkinter.SINGLE)
self.appGrid(self.slistbox, row=row, column=0, columnweight=1)
lrow = 0
for tmPktDef in SPACE.IF.s_definitions.getTMpktDefs():
packetName = tmPktDef.pktName
self.insertListboxRow(lrow, packetName)
lrow += 1
self.pollListbox()
# details
self.details = TMpacketDetails(master)
self.appGrid(self.details, row=row, column=1, columnweight=0)
# ---------------------------------------------------------------------------
def insertListboxRow(self, row, text):
"""Inserts a row into self.slistbox"""
self.slistbox.list().insert(row, text)
# ---------------------------------------------------------------------------
def listboxHasChanged(self, pos):
"""Callback when the selection of self.slistbox has been changed"""
if pos != None:
# display the packet data
tmPktDef = SPACE.IF.s_definitions.getTMpktDefByIndex(pos)
self.details.update(tmPktDef)
# ---------------------------------------------------------------------------
def pollListbox(self):
"""Polls if the selection of self.slistbox has been changed"""
now = self.slistbox.list().curselection()
if now != self.listboxCurrent:
if len(now) > 0:
self.listboxHasChanged(int(now[0]))
else:
self.listboxHasChanged(None)
self.listboxCurrent = now
self.afterID = self.after(250, self.pollListbox)
# ---------------------------------------------------------------------------
def apply(self):
"""Callback when the OK button is pressed"""
packetName = self.details.pktNameField.get()
if packetName != "":
paramNames = self.details.parameterNamesField.get()
paramValues = self.details.parameterValuesField.get()
self.result = [packetName, paramNames, paramValues]
# =============================================================================
class GUIview(UI.TKI.GUItabView):
"""Implementation of the SIM Space GUI layer"""
# ---------------------------------------------------------------------------
def __init__(self, master):
"""Initialise all GUI elements"""
UI.TKI.GUItabView.__init__(self, master, "SPACE", "Space Segment")
# menu buttons
self.menuButtons = UI.TKI.MenuButtons(self,
[["PKT", self.setPacketDataCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED],
["SND", self.sendPacketCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED],
["ACK", self.sendAckCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED],
["RPLY", self.replayPacketsCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG, tkinter.DISABLED],
["LIST", self.listPacketsCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG],
["GEN", self.generateCallback, COLOR_BUTTON_FG, COLOR_BUTTON_BG]])
self.appGrid(self.menuButtons,
row=0,
columnspan=2,
rowweight=0,
sticky=tkinter.EW)
# checkbuttons
self.checkButtons = UI.TKI.Checkbuttons(self,
[["TM", self.cyclicCallback, False, COLOR_ON_OK],
["ACK1", self.ack1Callback, True, COLOR_ON_OK],
["NAK1", self.nak1Callback, False, COLOR_ON_NOK],
["ACK2", self.ack2Callback, True, COLOR_ON_OK],
["NAK2", self.nak2Callback, False, COLOR_ON_NOK],
["ACK3", self.ack3Callback, True, COLOR_ON_OK],
["NAK3", self.nak3Callback, False, COLOR_ON_NOK],
["ACK4", self.ack4Callback, True, COLOR_ON_OK],
["NAK4", self.nak4Callback, False, COLOR_ON_NOK]])
self.appGrid(self.checkButtons,
row=1,
columnspan=2,
rowweight=0,
columnweight=0,
sticky=tkinter.W)
# tm status
self.tmStatusField = UI.TKI.ValueField(self, row=2, label="TM status:")
self.tmStatusField.set("INIT")
self.tmStatusField.setBackground(COLOR_INITIALISED)
# packet
self.packetField = UI.TKI.ValueField(self, row=3, label="Packet:")
# SPID
self.spidField = UI.TKI.ValueField(self, row=4, label="SPID:")
# parameter values
self.parameterValuesField = UI.TKI.ValueField(self, row=5, label="Parameters and values:")
# replay TM packets
self.replayTMpacketsField = UI.TKI.ValueField(self, row=6, label="Replay TM packets:")
# log messages
self.messageLogger = UI.TKI.MessageLogger(self, "SPACE")
self.appGrid(self.messageLogger, row=7, columnspan=2)
# message line
self.messageline = tkinter.Message(self, relief=tkinter.GROOVE)
self.appGrid(self.messageline,
row=8,
columnspan=2,
rowweight=0,
columnweight=0,
sticky=tkinter.EW)
self.grid(row=0, column=0, sticky=tkinter.EW+tkinter.NS)
self.master.rowconfigure(0, weight=1)
self.master.columnconfigure(0, weight=1)
# ---------------------------------------------------------------------------
def fillCommandMenuItems(self):
"""
fill the command menu bar,
implementation of UI.TKI.GUItabView.fillCommandMenuItems
"""
self.addCommandMenuItem(label="SetPacketData", command=self.setPacketDataCallback, enabled=False)
self.addCommandMenuItem(label="SendPacket", command=self.sendPacketCallback, enabled=False)
self.addCommandMenuItem(label="EnableCyclic", command=self.enableCyclicCallback)
self.addCommandMenuItem(label="DisableCyclic", command=self.disableCyclicCallback, enabled=False)
self.addCommandMenuItem(label="OBCenableAck1", command=self.obcEnableAck1Callback, enabled=False)
self.addCommandMenuItem(label="OBCenableNak1", command=self.obcEnableNak1Callback)
self.addCommandMenuItem(label="OBCdisableAck1", command=self.obcDisableAck1Callback)
self.addCommandMenuItem(label="OBCenableAck2", command=self.obcEnableAck2Callback, enabled=False)
self.addCommandMenuItem(label="OBCenableNak2", command=self.obcEnableNak2Callback)
self.addCommandMenuItem(label="OBCdisableAck2", command=self.obcDisableAck2Callback)
self.addCommandMenuItem(label="OBCenableAck3", command=self.obcEnableAck3Callback, enabled=False)
self.addCommandMenuItem(label="OBCenableNak3", command=self.obcEnableNak3Callback)
self.addCommandMenuItem(label="OBCdisableAck3", command=self.obcDisableAck3Callback)
self.addCommandMenuItem(label="OBCenableAck4", command=self.obcEnableAck4Callback, enabled=False)
self.addCommandMenuItem(label="OBCenableNak4", command=self.obcEnableNak4Callback)
self.addCommandMenuItem(label="OBCdisableAck4", command=self.obcDisableAck4Callback)
self.addCommandMenuItem(label="SendAck", command=self.sendAckCallback, enabled=False)
self.addCommandMenuItem(label="ReplayPackets", command=self.replayPacketsCallback, enabled=False)
self.addCommandMenuItem(label="ListPackets", command=self.listPacketsCallback)
self.addCommandMenuItem(label="Generate", command=self.generateCallback)
# ---------------------------------------------------------------------------
def setPacketDataCallback(self):
"""Called when the SetPacketData menu entry is selected"""
# do the dialog
dialog = TMpacketBrowser(self,
title="Set Packet Data Dialog",
prompt="Please select a packet and enter virtual channel and parameter name/values.")
if dialog.result != None:
packetName, paramNames, paramValues = dialog.result
if paramNames == "" or paramValues == "":
self.notifyModelTask(["SETPACKETDATA", packetName])
else:
self.notifyModelTask(["SETPACKETDATA", packetName, paramNames, paramValues])
# ---------------------------------------------------------------------------
def sendPacketCallback(self):
"""Called when the SendPacket menu entry is selected"""
self.notifyModelTask(["SENDPACKET"])
# ---------------------------------------------------------------------------
def enableCyclicCallback(self):
"""Called when the EnableCyclic menu entry is selected"""
self.notifyModelTask(["ENABLECYCLIC"])
def disableCyclicCallback(self):
"""Called when the DisableCyclic menu entry is selected"""
self.notifyModelTask(["DISABLECYCLIC"])
def cyclicCallback(self):
"""Called when the TM checkbutton is pressed"""
if self.checkButtons.getButtonPressed("TM"):
self.notifyModelTask(["ENABLECYCLIC"])
else:
self.notifyModelTask(["DISABLECYCLIC"])
# ---------------------------------------------------------------------------
def obcEnableAck1Callback(self):
"""Called when the OBCenableAck1 menu entry is selected"""
self.notifyModelTask(["OBCENABLEACK1"])
def obcEnableNak1Callback(self):
"""Called when the OBCenableNak1 menu entry is selected"""
self.notifyModelTask(["OBCENABLENAK1"])
def obcDisableAck1Callback(self):
"""Called when the OBCdisableAck1 menu entry is selected"""
self.notifyModelTask(["OBCDISABLEACK1"])
def ack1Callback(self):
"""Called when the ACK1 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK1"):
self.notifyModelTask(["OBCENABLEACK1"])
else:
self.notifyModelTask(["OBCDISABLEACK1"])
def nak1Callback(self):
"""Called when the NAK1 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK1"):
self.notifyModelTask(["OBCENABLENAK1"])
else:
self.notifyModelTask(["OBCDISABLEACK1"])
# ---------------------------------------------------------------------------
def obcEnableAck2Callback(self):
"""Called when the OBCenableAck2 menu entry is selected"""
self.notifyModelTask(["OBCENABLEACK2"])
def obcEnableNak2Callback(self):
"""Called when the OBCenableNak2 menu entry is selected"""
self.notifyModelTask(["OBCENABLENAK2"])
def obcDisableAck2Callback(self):
"""Called when the OBCdisableAck2 menu entry is selected"""
self.notifyModelTask(["OBCDISABLEACK2"])
def ack2Callback(self):
"""Called when the ACK2 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK2"):
self.notifyModelTask(["OBCENABLEACK2"])
else:
self.notifyModelTask(["OBCDISABLEACK2"])
def nak2Callback(self):
"""Called when the NAK2 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK2"):
self.notifyModelTask(["OBCENABLENAK2"])
else:
self.notifyModelTask(["OBCDISABLEACK2"])
# ---------------------------------------------------------------------------
def obcEnableAck3Callback(self):
"""Called when the OBCenableAck3 menu entry is selected"""
self.notifyModelTask(["OBCENABLEACK3"])
def obcEnableNak3Callback(self):
"""Called when the OBCenableNak3 menu entry is selected"""
self.notifyModelTask(["OBCENABLENAK3"])
def obcDisableAck3Callback(self):
"""Called when the OBCdisableAck3 menu entry is selected"""
self.notifyModelTask(["OBCDISABLEACK3"])
def ack3Callback(self):
"""Called when the ACK3 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK3"):
self.notifyModelTask(["OBCENABLEACK3"])
else:
self.notifyModelTask(["OBCDISABLEACK3"])
def nak3Callback(self):
"""Called when the NAK3 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK3"):
self.notifyModelTask(["OBCENABLENAK3"])
else:
self.notifyModelTask(["OBCDISABLEACK3"])
# ---------------------------------------------------------------------------
def obcEnableAck4Callback(self):
"""Called when the OBCenableAck4 menu entry is selected"""
self.notifyModelTask(["OBCENABLEACK4"])
def obcEnableNak4Callback(self):
"""Called when the OBCenableNak4 menu entry is selected"""
self.notifyModelTask(["OBCENABLENAK4"])
def obcDisableAck4Callback(self):
"""Called when the OBCdisableAck4 menu entry is selected"""
self.notifyModelTask(["OBCDISABLEACK4"])
def ack4Callback(self):
"""Called when the ACK4 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("ACK4"):
self.notifyModelTask(["OBCENABLEACK4"])
else:
self.notifyModelTask(["OBCDISABLEACK4"])
def nak4Callback(self):
"""Called when the NAK4 checkbutton is pressed"""
if self.checkButtons.getButtonPressed("NAK4"):
self.notifyModelTask(["OBCENABLENAK4"])
else:
self.notifyModelTask(["OBCDISABLEACK4"])
# ---------------------------------------------------------------------------
def sendAckCallback(self):
"""Called when the SendAck menu entry is selected"""
dialog = UI.TKI.InputDialog(master=self,
title="TC Acknowledgement",
prompt="Enter data for TC Acknowledgement Report (PUS service 1)",
fieldsSpec = [["InputField", "TC APID:"],
["InputField", "TC SSC:"],
["Radiobuttons", "Subtype 1 - Accept Success:|" +
"Subtype 2 - Accept Fail:|" +
"Subtype 3 - Exec Start Success:|" +
"Subtype 4 - Exec Start Fail:|" +
"Subtype 5 - Exec Proceed Success:|" +
"Subtype 6 - Exec Proceed Fail:|" +
"Subtype 7 - Exec Finish Success:|" +
"Subtype 8 - Exec Finish Fail:"]])
if dialog.result != None:
apidStr = dialog.result[0]
sscStr = dialog.result[1]
subtypeStr = str(dialog.result[2] + 1)
self.notifyModelTask(["SENDACK", apidStr, sscStr, subtypeStr])
# ---------------------------------------------------------------------------
def replayPacketsCallback(self):
"""Called when the ReplayPackets menu entry is selected"""
fileName = filedialog.askopenfilename(title="Open TM Packet Replay File",
initialdir=SCOS.ENV.s_environment.tmFilesDir())
if fileName != "" and fileName != ():
self.notifyModelTask(["REPLAYPACKETS", fileName])
# ---------------------------------------------------------------------------
def listPacketsCallback(self):
"""Called when the ListPackets menu entry is selected"""
# disable the button during generation,
# because generation could take some time
self.menuButtons.setState("LIST", tkinter.DISABLED)
self.notifyModelTask(["LISTPACKETS"])
self.menuButtons.setState("LIST", tkinter.NORMAL)
# ---------------------------------------------------------------------------
def generateCallback(self):
"""Called when the Generate menu entry is selected"""
# disable the button during generation,
# because generation could take some time
self.menuButtons.setState("GEN", tkinter.DISABLED)
self.notifyModelTask(["GENERATE"])
self.menuButtons.setState("GEN", tkinter.NORMAL)
# ---------------------------------------------------------------------------
def notifyStatus(self, status):
"""Generic callback when something changes in the model"""
if status == "TM_CONNECTED":
self.tmConnectedNotify()
elif status == "TM_RECORDING":
self.tmRecordingNotify()
elif status == "PACKETDATA_SET":
self.packetDataSetNotify()
elif status == "UPDATE_REPLAY":
self.updateReplayNotify()
elif status == "ENABLED_CYCLIC":
self.enabledCyclicNotify()
elif status == "DISABLED_CYCLIC":
self.disabledCyclicNotify()
elif status == "OBC_ENABLED_ACK1":
self.obcEnabledAck1Notify()
elif status == "OBC_ENABLED_NAK1":
self.obcEnabledNak1Notify()
elif status == "OBC_DISABLED_ACK1":
self.obcDisabledAck1Notify()
elif status == "OBC_ENABLED_ACK2":
self.obcEnabledAck2Notify()
elif status == "OBC_ENABLED_NAK2":
self.obcEnabledNak2Notify()
elif status == "OBC_DISABLED_ACK2":
self.obcDisabledAck2Notify()
elif status == "OBC_ENABLED_ACK3":
self.obcEnabledAck3Notify()
elif status == "OBC_ENABLED_NAK3":
self.obcEnabledNak3Notify()
elif status == "OBC_DISABLED_ACK3":
self.obcDisabledAck3Notify()
elif status == "OBC_ENABLED_ACK4":
self.obcEnabledAck4Notify()
elif status == "OBC_ENABLED_NAK4":
self.obcEnabledNak4Notify()
elif status == "OBC_DISABLED_ACK4":
self.obcDisabledAck4Notify()
elif status == "FRAME_REC_STARTED":
self.frameRecStarted()
elif status == "FRAME_REC_STOPPED":
self.frameRecStopped()
# ---------------------------------------------------------------------------
def tmConnectedNotify(self):
"""Called when the TM connect function is successfully processed"""
self.enableCommandMenuItem("SetPacketData")
self.enableCommandMenuItem("EnableCyclic")
self.enableCommandMenuItem("SendAck")
self.enableCommandMenuItem("ReplayPackets")
self.menuButtons.setState("PKT", tkinter.NORMAL)
self.menuButtons.setState("ACK", tkinter.NORMAL)
self.menuButtons.setState("RPLY", tkinter.NORMAL)
self.updateTMstatusField()
# ---------------------------------------------------------------------------
def packetDataSetNotify(self):
"""Called when the setPacketData function is successfully processed"""
self.enableCommandMenuItem("SendPacket")
self.menuButtons.setState("SND", tkinter.NORMAL)
self.updateTMstatusField()
self.packetField.set(SPACE.IF.s_configuration.tmPacketData.pktName)
self.spidField.set(SPACE.IF.s_configuration.tmPacketData.pktSPID)
nameValueStr = ""
for nameValue in SPACE.IF.s_configuration.tmPacketData.parameterValuesList:
if nameValueStr != "":
nameValueStr += ", "
nameValueStr += nameValue[0] + "=" + nameValue[1]
self.parameterValuesField.set(nameValueStr)
# ---------------------------------------------------------------------------
def updateReplayNotify(self):
"""Called when the replay state has changed"""
replayItems = SPACE.IF.s_tmPacketReplayer.getItems()
nrPackets = len(replayItems)
if nrPackets == 0:
txt = ""
else:
txt = str(nrPackets) + ": "
# item 0
item0 = replayItems[0]
itemType0, itemVal0 = item0
if itemType0 == SPACE.IF.RPLY_PKT:
txt += itemVal0.pktName
elif itemType0 == SPACE.IF.RPLY_RAWPKT:
txt += "raw"
elif itemType0 == SPACE.IF.RPLY_SLEEP:
txt += "sleep(" + str(itemVal0) + ")"
elif itemType0 == SPACE.IF.RPLY_OBT:
txt += "obt(" + UTIL.TIME.getASDtimeStr(itemVal0) + ")"
else:
txt += "ert(" + UTIL.TIME.getASDtimeStr(itemVal0) + ")"
# item 1
if nrPackets > 1:
item1 = replayItems[1]
itemType1, itemVal1 = item1
if itemType1 == SPACE.IF.RPLY_PKT:
txt += ", " + itemVal1.pktName
elif itemType1 == SPACE.IF.RPLY_RAWPKT:
txt += ", raw"
elif itemType1 == SPACE.IF.RPLY_SLEEP:
txt += ", sleep(" + str(itemVal1) + ")"
elif itemType1 == SPACE.IF.RPLY_OBT:
txt += ", obt(" + UTIL.TIME.getASDtimeStr(itemVal1) + ")"
else:
txt += ", ert(" + UTIL.TIME.getASDtimeStr(itemVal1) + ")"
# item 2
if nrPackets > 2:
item2 = replayItems[2]
itemType2, itemVal2 = item2
if itemType2 == SPACE.IF.RPLY_PKT:
txt += ", " + itemVal2.pktName
elif itemType2 == SPACE.IF.RPLY_RAWPKT:
txt += ", raw"
elif itemType2 == SPACE.IF.RPLY_SLEEP:
txt += ", sleep(" + str(itemVal2) + ")"
elif itemType2 == SPACE.IF.RPLY_OBT:
txt += ", obt(" + UTIL.TIME.getASDtimeStr(itemVal2) + ")"
else:
txt += ", ert(" + UTIL.TIME.getASDtimeStr(itemVal2) + ")"
if nrPackets > 3:
txt += ", ..."
self.replayTMpacketsField.set(txt)
# ---------------------------------------------------------------------------
def enabledCyclicNotify(self):
"""Called when the enableCyclic function is successfully processed"""
self.disableCommandMenuItem("EnableCyclic")
self.enableCommandMenuItem("DisableCyclic")
self.checkButtons.setButtonPressed("TM", True)
def disabledCyclicNotify(self):
"""Called when the disableCyclic function is successfully processed"""
self.enableCommandMenuItem("EnableCyclic")
self.disableCommandMenuItem("DisableCyclic")
self.checkButtons.setButtonPressed("TM", False)
# ---------------------------------------------------------------------------
def obcEnabledAck1Notify(self):
"""Called when the obcEnabledAck1 function is successfully processed"""
self.disableCommandMenuItem("OBCenableAck1")
self.enableCommandMenuItem("OBCenableNak1")
self.enableCommandMenuItem("OBCdisableAck1")
self.checkButtons.setButtonPressed("ACK1", True)
self.checkButtons.setButtonPressed("NAK1", False)
def obcEnabledNak1Notify(self):
"""Called when the obcEnabledNak1 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck1")
self.disableCommandMenuItem("OBCenableNak1")
self.enableCommandMenuItem("OBCdisableAck1")
self.checkButtons.setButtonPressed("ACK1", False)
self.checkButtons.setButtonPressed("NAK1", True)
def obcDisabledAck1Notify(self):
"""Called when the obcDisabledAck1 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck1")
self.enableCommandMenuItem("OBCenableNak1")
self.disableCommandMenuItem("OBCdisableAck1")
self.checkButtons.setButtonPressed("ACK1", False)
self.checkButtons.setButtonPressed("NAK1", False)
# ---------------------------------------------------------------------------
def obcEnabledAck2Notify(self):
"""Called when the obcEnabledAck2 function is successfully processed"""
self.disableCommandMenuItem("OBCenableAck2")
self.enableCommandMenuItem("OBCenableNak1")
self.enableCommandMenuItem("OBCdisableAck2")
self.checkButtons.setButtonPressed("ACK2", True)
self.checkButtons.setButtonPressed("NAK2", False)
def obcEnabledNak2Notify(self):
"""Called when the obcEnabledNak2 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck2")
self.disableCommandMenuItem("OBCenableNak2")
self.enableCommandMenuItem("OBCdisableAck2")
self.checkButtons.setButtonPressed("ACK2", False)
self.checkButtons.setButtonPressed("NAK2", True)
def obcDisabledAck2Notify(self):
"""Called when the obcDisabledAck2 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck2")
self.enableCommandMenuItem("OBCenableNak2")
self.disableCommandMenuItem("OBCdisableAck2")
self.checkButtons.setButtonPressed("ACK2", False)
self.checkButtons.setButtonPressed("NAK2", False)
# ---------------------------------------------------------------------------
def obcEnabledAck3Notify(self):
"""Called when the obcEnabledAck3 function is successfully processed"""
self.disableCommandMenuItem("OBCenableAck3")
self.enableCommandMenuItem("OBCenableNak3")
self.enableCommandMenuItem("OBCdisableAck3")
self.checkButtons.setButtonPressed("ACK3", True)
self.checkButtons.setButtonPressed("NAK3", False)
def obcEnabledNak3Notify(self):
"""Called when the obcEnabledNak3 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck3")
self.disableCommandMenuItem("OBCenableNak3")
self.enableCommandMenuItem("OBCdisableAck3")
self.checkButtons.setButtonPressed("ACK3", False)
self.checkButtons.setButtonPressed("NAK3", True)
def obcDisabledAck3Notify(self):
"""Called when the obcDisabledAck3 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck3")
self.enableCommandMenuItem("OBCenableNak3")
self.disableCommandMenuItem("OBCdisableAck3")
self.checkButtons.setButtonPressed("ACK3", False)
self.checkButtons.setButtonPressed("NAK3", False)
# ---------------------------------------------------------------------------
def obcEnabledAck4Notify(self):
"""Called when the obcEnabledAck4 function is successfully processed"""
self.disableCommandMenuItem("OBCenableAck4")
self.enableCommandMenuItem("OBCenableNak4")
self.enableCommandMenuItem("OBCdisableAck4")
self.checkButtons.setButtonPressed("ACK4", True)
self.checkButtons.setButtonPressed("NAK4", False)
def obcEnabledNak4Notify(self):
"""Called when the obcEnabledNak4 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck4")
self.disableCommandMenuItem("OBCenableNak4")
self.enableCommandMenuItem("OBCdisableAck4")
self.checkButtons.setButtonPressed("ACK4", False)
self.checkButtons.setButtonPressed("NAK4", True)
def obcDisabledAck4Notify(self):
"""Called when the obcDisabledAck4 function is successfully processed"""
self.enableCommandMenuItem("OBCenableAck4")
self.enableCommandMenuItem("OBCenableNak4")
self.disableCommandMenuItem("OBCdisableAck4")
self.checkButtons.setButtonPressed("ACK4", False)
self.checkButtons.setButtonPressed("NAK4", False)
# ---------------------------------------------------------------------------
def frameRecStarted(self):
"""Called when the recordFrames function is successfully processed"""
self.enableCommandMenuItem("SetPacketData")
self.enableCommandMenuItem("EnableCyclic")
self.enableCommandMenuItem("SendAck")
self.enableCommandMenuItem("ReplayPackets")
self.menuButtons.setState("PKT", tkinter.NORMAL)
self.menuButtons.setState("ACK", tkinter.NORMAL)
self.menuButtons.setState("RPLY", tkinter.NORMAL)
# ---------------------------------------------------------------------------
def frameRecStopped(self):
"""Called when the stopFrameRecorder function is successfully processed"""
if SPACE.IF.s_configuration.connected:
self.enableCommandMenuItem("SetPacketData")
self.enableCommandMenuItem("EnableCyclic")
self.enableCommandMenuItem("SendAck")
self.enableCommandMenuItem("ReplayPackets")
self.menuButtons.setState("PKT", tkinter.NORMAL)
self.menuButtons.setState("ACK", tkinter.NORMAL)
self.menuButtons.setState("RPLY", tkinter.NORMAL)
else:
self.disableCommandMenuItem("SetPacketData")
self.disableCommandMenuItem("EnableCyclic")
self.disableCommandMenuItem("SendAck")
self.disableCommandMenuItem("ReplayPackets")
self.menuButtons.setState("PKT", tkinter.DISABLED)
self.menuButtons.setState("ACK", tkinter.DISABLED)
self.menuButtons.setState("RPLY", tkinter.DISABLED)
# ---------------------------------------------------------------------------
def updateTMstatusField(self):
"""updated the TM status field depending on the SPACE.IF.s_configuration"""
if SPACE.IF.s_configuration.connected:
txt = "CONNECTED"
bgColor = COLOR_CONNECTED
else:
txt = "INIT"
if SPACE.IF.s_configuration.tmPacketData != None:
txt += " + PKT DEFINED"
self.tmStatusField.set(txt)
if SPACE.IF.s_configuration.connected:
self.tmStatusField.setBackground(bgColor)
| [
"[email protected]"
] | |
63a3e633e544e4a017474a3cba78a6c0a93f189b | 17070ea982156a8553c24e2ea3b687fb1dc5544e | /shop/views.py | 02cd002f7c32aecc9a6deff58f0d5b489658af0a | [] | no_license | akiyoko/django-concurrency-sample | 75353fe55e0376e08f2c888b5feb323f9728fc1a | 8b9fd1e04a034cb0d8e6d1915d864b13b1726608 | refs/heads/main | 2023-01-22T10:49:39.375878 | 2020-12-01T05:17:53 | 2020-12-01T05:17:53 | 317,429,697 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,840 | py | import logging
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.db import transaction
# from django.http.response import HttpResponseRedirect
from django.shortcuts import get_object_or_404
from django.template.response import TemplateResponse
from django.utils.decorators import method_decorator
from django.views import View
from .models import Book, BookStock, Order
logger = logging.getLogger(__name__)
User = get_user_model()
@method_decorator(transaction.non_atomic_requests, name='dispatch')
class CheckoutView(View):
def get(self, request, *args, **kwargs):
book = get_object_or_404(Book, pk=kwargs['pk'])
book_stock = get_object_or_404(BookStock, book=book)
if book_stock.quantity == 0:
messages.error(request, "在庫がないので購入できません。")
context = {
'book': book,
'book_stock': book_stock,
}
return TemplateResponse(request, 'shop/checkout.html', context)
def post(self, request, *args, **kwargs):
# # TODO: ログイン状態をシミュレート
# request.user = User(pk=1)
book = get_object_or_404(Book, pk=kwargs['pk'])
# 1) デフォルト
# 2) ATOMIC_REQUESTS を有効化
# # ① 注文情報を登録
# order = Order(
# status=Order.STATUS_PAYMENT_PROCESSING,
# total_amount=book.price,
# ordered_by=request.user,
# )
# order.save()
#
# # ② 在庫数を確認
# book_stock = get_object_or_404(BookStock, book=book)
# # ③ 在庫数を1減らして更新
# book_stock.quantity -= 1
# book_stock.save()
#
# # 決済処理
# try:
# print('決済処理')
# # TODO
# # raise Exception("決済処理で例外発生")
# except Exception as e:
# # 在庫を1つ増やして更新
# book_stock = get_object_or_404(BookStock, book=book)
# book_stock.quantity += 1
# book_stock.save()
#
# # 注文情報のステータスを更新
# order.status = Order.STATUS_PAYMENT_NG
# order.save()
#
# messages.error(request, "決済NGです。")
# return TemplateResponse(request, 'shop/checkout_error.html')
#
# # ④ 注文情報のステータスを更新
# order.status = Order.STATUS_PAYMENT_OK
# order.save()
# 3) transaction.atomic() で囲む
# 4) ATOMIC_REQUESTS を有効化しているときに、特定のメソッド内で自前でトランザクションを切る
with transaction.atomic():
# ① 注文情報を登録
order = Order(
status=Order.STATUS_PAYMENT_PROCESSING,
total_amount=book.price,
ordered_by=request.user,
)
order.save()
# ② 在庫数を確認
book_stock = get_object_or_404(BookStock, book=book)
# ③ 在庫数を1減らして更新
book_stock.quantity -= 1
book_stock.save()
# ...(決済処理)...
print('決済処理')
with transaction.atomic():
# ④ 注文情報のステータスを更新
order.status = Order.STATUS_PAYMENT_OK
order.save()
messages.info(request, "購入しました。")
if book_stock.quantity == 0:
messages.warning(request, "在庫がなくなりました。")
context = {
'book': book,
'book_stock': book_stock,
'order': order,
}
return TemplateResponse(request, 'shop/checkout.html', context)
| [
"[email protected]"
] | |
c7e0e8f56c9b540a6d37dce314d31c36ea920326 | 27e890f900bd4bfb2e66f4eab85bc381cf4d5d3f | /tests/unit/modules/network/onyx/test_onyx_ospf.py | 665633222c74febcc7f196f3e51d0f6b0b91d4fb | [] | no_license | coll-test/notstdlib.moveitallout | eb33a560070bbded5032385d0aea2f3cf60e690b | 0987f099b783c6cf977db9233e1c3d9efcbcb3c7 | refs/heads/master | 2020-12-19T22:28:33.369557 | 2020-01-23T18:51:26 | 2020-01-23T18:51:26 | 235,865,139 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,664 | py | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible_collections.notstdlib.moveitallout.tests.unit.compat.mock import patch
from ansible_collections.notstdlib.moveitallout.plugins.modules import onyx_ospf
from ansible_collections.notstdlib.moveitallout.tests.unit.modules.utils import set_module_args
from ..onyx_module import TestOnyxModule, load_fixture
class TestOnyxOspfModule(TestOnyxModule):
module = onyx_ospf
def setUp(self):
super(TestOnyxOspfModule, self).setUp()
self._ospf_exists = True
self.mock_get_config = patch.object(
onyx_ospf.OnyxOspfModule,
"_get_ospf_config")
self.get_config = self.mock_get_config.start()
self.mock_get_interfaces_config = patch.object(
onyx_ospf.OnyxOspfModule,
"_get_ospf_interfaces_config")
self.get_interfaces_config = self.mock_get_interfaces_config.start()
self.mock_load_config = patch(
'ansible_collections.notstdlib.moveitallout.plugins.module_utils.network.onyx.onyx.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestOnyxOspfModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, transport='cli'):
if self._ospf_exists:
config_file = 'onyx_ospf_show.cfg'
self.get_config.return_value = load_fixture(config_file)
config_file = 'onyx_ospf_interfaces_show.cfg'
self.get_interfaces_config.return_value = load_fixture(config_file)
else:
self.get_config.return_value = None
self.get_interfaces_config.return_value = None
self.load_config.return_value = None
def test_ospf_absent_no_change(self):
set_module_args(dict(ospf=3, state='absent'))
self.execute_module(changed=False)
def test_ospf_present_no_change(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=[interface]))
self.execute_module(changed=False)
def test_ospf_present_remove(self):
set_module_args(dict(ospf=2, state='absent'))
commands = ['no router ospf 2']
self.execute_module(changed=True, commands=commands)
def test_ospf_change_router(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, router_id='10.2.3.5',
interfaces=[interface]))
commands = ['router ospf 2', 'router-id 10.2.3.5', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ospf_remove_router(self):
interface = dict(name='Loopback 1', area='0.0.0.0')
set_module_args(dict(ospf=2, interfaces=[interface]))
commands = ['router ospf 2', 'no router-id', 'exit']
self.execute_module(changed=True, commands=commands, sort=False)
def test_ospf_add_interface(self):
interfaces = [dict(name='Loopback 1', area='0.0.0.0'),
dict(name='Loopback 2', area='0.0.0.0')]
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=interfaces))
commands = ['interface loopback 2 ip ospf area 0.0.0.0']
self.execute_module(changed=True, commands=commands)
def test_ospf_remove_interface(self):
set_module_args(dict(ospf=2, router_id='10.2.3.4'))
commands = ['interface loopback 1 no ip ospf area']
self.execute_module(changed=True, commands=commands)
def test_ospf_add(self):
self._ospf_exists = False
interfaces = [dict(name='Loopback 1', area='0.0.0.0'),
dict(name='Vlan 210', area='0.0.0.0'),
dict(name='Eth1/1', area='0.0.0.0'),
dict(name='Po1', area='0.0.0.0')]
set_module_args(dict(ospf=2, router_id='10.2.3.4',
interfaces=interfaces))
commands = ['router ospf 2', 'router-id 10.2.3.4', 'exit',
'interface loopback 1 ip ospf area 0.0.0.0',
'interface vlan 210 ip ospf area 0.0.0.0',
'interface ethernet 1/1 ip ospf area 0.0.0.0',
'interface port-channel 1 ip ospf area 0.0.0.0']
self.execute_module(changed=True, commands=commands)
| [
"[email protected]"
] | |
317288bb41c5c374236f56788577a76f1c080b9c | 42fe2827d14a82043ade9393beaedf53e22a69f5 | /bebop_ws/devel/.private/bebop_msgs/lib/python2.7/dist-packages/bebop_msgs/msg/_CommonCommonStateCurrentDateChanged.py | 55096047d13f8e60d5b3ab4a3aa26cae99d7e236 | [] | no_license | cjbanks/bebop-software-framework | a3714646545e9d7d71299a365814bc87437f5e14 | 7da1bbdef4e84aa0ed793cfaad9fe133959ebe21 | refs/heads/master | 2023-04-30T17:52:23.255302 | 2020-11-18T18:32:41 | 2020-11-18T18:32:41 | 368,626,051 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,233 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from bebop_msgs/CommonCommonStateCurrentDateChanged.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class CommonCommonStateCurrentDateChanged(genpy.Message):
_md5sum = "7b1c2ad09d95986b33cc46dd275d6aad"
_type = "bebop_msgs/CommonCommonStateCurrentDateChanged"
_has_header = True # flag to mark the presence of a Header object
_full_text = """# CommonCommonStateCurrentDateChanged
# auto-generated from up stream XML files at
# github.com/Parrot-Developers/libARCommands/tree/master/Xml
# To check upstream commit hash, refer to last_build_info file
# Do not modify this file by hand. Check scripts/meta folder for generator files.
#
# SDK Comment: Date changed.\n Corresponds to the latest date set on the drone.\n\n **Please note that you should not care about this event if you are using the libARController API as this library is handling the connection process for you.**
Header header
# Date with ISO-8601 format
string date
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
"""
__slots__ = ['header','date']
_slot_types = ['std_msgs/Header','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,date
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(CommonCommonStateCurrentDateChanged, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.date is None:
self.date = ''
else:
self.header = std_msgs.msg.Header()
self.date = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.date
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.date = str[start:end].decode('utf-8')
else:
self.date = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.date
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.date = str[start:end].decode('utf-8')
else:
self.date = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
| [
"Chewie_Alex@nder1"
] | Chewie_Alex@nder1 |
73c728462aaa1aeb1ff14b80acd3d67f327d7557 | 106983cf0b8df622f514ecff2bb2fa4c794c9dac | /Misc/OpenCV/camshiftTest.py | 5677142b105f693d0656e9845a8b7bfcaa575dc3 | [] | no_license | michael5486/Senior-Design | 2d9ae521c637abf7c0825f85b32752ad61c62744 | 6b6c78bed5f20582a9753a9c10020c709d6b6e53 | refs/heads/master | 2021-01-19T09:58:35.378164 | 2017-05-26T17:17:13 | 2017-05-26T17:17:13 | 67,556,475 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,367 | py | #!/usr/bin/env python
import cv2.cv as cv
import serial
#ser = serial.Serial("/dev/ttyACM0",9600)
def is_rect_nonzero(r):
(_,_,w,h) = r
return (w > 0) and (h > 0)
class CamShiftDemo:
def __init__(self):
self.capture = cv.CaptureFromCAM(0)
cv.NamedWindow( "CamShiftDemo", 1 )
cv.NamedWindow( "Histogram", 1 )
cv.SetMouseCallback( "CamShiftDemo", self.on_mouse)
self.drag_start = None # Set to (x,y) when mouse starts drag
self.track_window = None # Set to rect when the mouse drag finishes
print( "Keys:\n"
" ESC - quit the program\n"
" b - switch to/from backprojection view\n"
"To initialize tracking, drag across the object with the mouse\n" )
def hue_histogram_as_image(self, hist):
""" Returns a nice representation of a hue histogram """
histimg_hsv = cv.CreateImage( (320,200), 8, 3)
mybins = cv.CloneMatND(hist.bins)
cv.Log(mybins, mybins)
(_, hi, _, _) = cv.MinMaxLoc(mybins)
cv.ConvertScale(mybins, mybins, 255. / hi)
w,h = cv.GetSize(histimg_hsv)
hdims = cv.GetDims(mybins)[0]
for x in range(w):
xh = (180 * x) / (w - 1) # hue sweeps from 0-180 across the image
val = int(mybins[int(hdims * x / w)] * h / 255)
cv.Rectangle( histimg_hsv, (x, 0), (x, h-val), (xh,255,64), -1)
cv.Rectangle( histimg_hsv, (x, h-val), (x, h), (xh,255,255), -1)
histimg = cv.CreateImage( (320,200), 8, 3)
cv.CvtColor(histimg_hsv, histimg, cv.CV_HSV2BGR)
return histimg
def on_mouse(self, event, x, y, flags, param):
if event == cv.CV_EVENT_LBUTTONDOWN:
self.drag_start = (x, y)
if event == cv.CV_EVENT_LBUTTONUP:
self.drag_start = None
self.track_window = self.selection
if self.drag_start:
xmin = min(x, self.drag_start[0])
ymin = min(y, self.drag_start[1])
xmax = max(x, self.drag_start[0])
ymax = max(y, self.drag_start[1])
self.selection = (xmin, ymin, xmax - xmin, ymax - ymin)
def run(self):
hist = cv.CreateHist([180], cv.CV_HIST_ARRAY, [(0,180)], 1 )
backproject_mode = False
print "hitting run section"
x = 0
while True:
#print x
#x = x + 1
frame = cv.QueryFrame( self.capture )
cv.Flip(frame, frame, 1)
# Convert to HSV and keep the hue
hsv = cv.CreateImage(cv.GetSize(frame), 8, 3)
cv.CvtColor(frame, hsv, cv.CV_BGR2HSV)
self.hue = cv.CreateImage(cv.GetSize(frame), 8, 1)
cv.Split(hsv, self.hue, None, None, None)
# Compute back projection
backproject = cv.CreateImage(cv.GetSize(frame), 8, 1)
# Run the cam-shift
cv.CalcArrBackProject( [self.hue], backproject, hist )
if self.track_window and is_rect_nonzero(self.track_window):
crit = ( cv.CV_TERMCRIT_EPS | cv.CV_TERMCRIT_ITER, 10, 1)
print self.track_window
(iters, (area, value, rect), track_box) = cv.CamShift(backproject, self.track_window, crit)
self.track_window = rect
print self.track_window
try:
#prints the center x and y value of the tracked ellipse
coord = track_box[0]
print "center = {}".format(coord)
if (coord[0] < 320):
print "move right"
# ser.write("R")
elif (coord[0] == 320):
print "do nothing"
else:
print "move left"
# ser.write("L")
except UnboundLocalError:
print "track_box is None"
# If mouse is pressed, highlight the current selected rectangle
# and recompute the histogram
if self.drag_start and is_rect_nonzero(self.selection):
sub = cv.GetSubRect(frame, self.selection)
save = cv.CloneMat(sub)
cv.ConvertScale(frame, frame, 0.5)
cv.Copy(save, sub)
x,y,w,h = self.selection
cv.Rectangle(frame, (x,y), (x+w,y+h), (255,255,255))
sel = cv.GetSubRect(self.hue, self.selection )
cv.CalcArrHist( [sel], hist, 0)
(_, max_val, _, _) = cv.GetMinMaxHistValue( hist)
if max_val != 0:
cv.ConvertScale(hist.bins, hist.bins, 255. / max_val)
elif self.track_window and is_rect_nonzero(self.track_window):
print track_box
cv.EllipseBox( frame, track_box, cv.CV_RGB(255,0,0), 3, cv.CV_AA, 0 )
if not backproject_mode:
cv.ShowImage( "CamShiftDemo", frame )
else:
cv.ShowImage( "CamShiftDemo", backproject)
cv.ShowImage( "Histogram", self.hue_histogram_as_image(hist))
c = cv.WaitKey(7) % 0x100
if c == 27:
break
elif c == ord("b"):
backproject_mode = not backproject_mode
if __name__=="__main__":
demo = CamShiftDemo()
demo.run()
cv.DestroyAllWindows()
| [
"[email protected]"
] | |
47befcf66e46b26472ad8cb956c2fc14284c7c9e | 3794bc772676d34a6794d19eedb41c2d8a7d39c0 | /ge_dqn/monitor.py | 53024700f3b5ca11545565d3ad057f2807cd0141 | [] | no_license | geyang/reinforcement_learning_learning_notes | 3a79af021b6b126e37b09bf1871cfe9852690abe | f862dbf496f7f5d6cb091604dfb808511de5aa9c | refs/heads/master | 2021-08-23T11:32:14.127137 | 2017-12-04T18:28:35 | 2017-12-04T18:28:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 970 | py | from copy import deepcopy
import numpy
def contextify(env):
type(env).__enter__ = lambda s: s
type(env).__exit__ = lambda s, *args: s.close()
return env
def monitor(env):
episode_rewards = []
_step = env.step
def step(action):
s, rew, done, info = _step(action)
episode_rewards.append(rew)
if not done:
return s, rew, done, info
episode_info = dict(
total_reward=sum(episode_rewards),
average_reward=numpy.mean(episode_rewards),
timesteps=len(episode_rewards)
)
episode_rewards.clear()
if type(info) is list:
info = deepcopy(info) + [episode_info]
elif type(info) is tuple:
info = tuple(*deepcopy(info), *episode_info)
elif hasattr(info, 'update'):
info = deepcopy(info)
info.update(**episode_info)
return s, rew, done, info
env.step = step
return env
| [
"[email protected]"
] | |
815fb3177d93f4c5b3da4d57786399655d7a5e2b | 493a36f1f8606c7ddce8fc7fe49ce4409faf80be | /.history/B073040023/client_20210614185342.py | 411412020365d07802e69305599262f66838a62f | [] | no_license | ZhangRRz/computer_network | f7c3b82e62920bc0881dff923895da8ae60fa653 | 077848a2191fdfe2516798829644c32eaeded11e | refs/heads/main | 2023-05-28T02:18:09.902165 | 2021-06-15T06:28:59 | 2021-06-15T06:28:59 | 376,568,344 | 0 | 0 | null | 2021-06-13T14:48:36 | 2021-06-13T14:48:36 | null | UTF-8 | Python | false | false | 5,078 | py | import socket
import threading
import tcppacket
import struct
from time import sleep
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(msg):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg.encode('utf-8')
print(data)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = "video 1".encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address,
"with header", raw)
if(raw[2] == ack_seq and raw[7] == 0):
recvdata += data[s:]
if(raw[5] % 2):
# fin_falg
fin_flag = 1
else:
fin_flag = 0
ack_seq += 1
counter += 1
else:
print("Receive ERROR packet from ", address)
fin_flag = 1
counter = 3
# --------------------------------------------
# send ACK
if(counter == 3):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,
"with ack seq: ", ack_seq, " and seq: ", seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
print(fin_flag)
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
#Calculation
print("Demo calculation function")
init_new_calc_req("calc 2 + 6")
sleep(0.25)
init_new_calc_req("calc 2 - 6")
sleep(0.25)
init_new_calc_req("calc 2 * 6")
sleep(0.25)
init_new_calc_req("calc 2 / 6")
sleep(0.25)
init_new_calc_req("calc 2 ^ 6")
sleep(0.25)
init_new_calc_req("calc 16 sqrt")
sleep(0.25)
# threads.append(threading.Thread(target = init_new_calc_req, args = (i,)))
# threads[-1].start()
# for i in range(1):
# threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
# threads[-1].start()
# for i in range(1):
# threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
# threads[-1].start() | [
"[email protected]"
] | |
ac61d410d9419c6949dc2e7bb0e4fd3b37e85afe | 2b7efe276d1dfdc70a4b5cd59ae863b7b7a1bd58 | /euler35.py | 24b79c529bb65377213bed68a3834c21df6f4544 | [] | no_license | mckkcm001/euler | 550bbd126e8d9bb5bc7cb854147399060f865cfc | 8cf1db345b05867d47921b01e8c7e4c2df4ee98d | refs/heads/master | 2021-01-01T17:43:28.799946 | 2017-11-07T02:17:34 | 2017-11-07T02:17:34 | 18,375,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | import math
n = [2]
def is_prime(n):
if n % 2 == 0 and n > 2:
return False
for i in range(3, int(math.sqrt(n)) + 1, 2):
if n % i == 0:
return False
return True
def is_circ(n):
a = n
for i in range(len(str(n))):
a = 10**(len(str(a))-1)*(a%10)+ a//10
if not is_prime(a):
return False
return True
for i in range(3,1000000,2):
if i%10 == 0:
continue
if is_circ(i):
n.append(i)
print(len(n))
| [
"[email protected]"
] | |
320aa009bc8015194f321089be13615ebf99be42 | 8b83d79425985e9c87ff4b641c2dcb6a151f3aa1 | /recipes/templatetags/markdown.py | 75cbd8e9f44234b8d253b147c3548fd001844065 | [] | no_license | akx/pyttipannu | e1366d982bae62a70da24b7da1a93c40efb51217 | 7b02f7d18d594beddb64beb99283c738ca06b8f0 | refs/heads/master | 2021-01-13T09:15:59.086825 | 2016-09-29T13:52:17 | 2016-09-29T13:52:17 | 69,023,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 287 | py | from django.template import Library
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from markdown import markdown as render_markdown
register = Library()
@register.filter
def markdown(s):
return mark_safe(render_markdown(force_text(s)))
| [
"[email protected]"
] | |
dfa802d2eab75f6143932b9db16d2742cd829829 | 84ee74894d1e6d76281dd1d3b76ee1dcde0d36b5 | /plotting/visualisePhi.py | 1c7e4c4f7e5da898e37f21b143394c229a9fa1a5 | [] | no_license | pyccel/pygyro | e3f13e5679b37a2dfebbd4b10337e6adefea1105 | a8562e3f0dd8fd56159785e655f017bbcae92e51 | refs/heads/master | 2023-03-10T07:43:17.663359 | 2022-08-17T12:06:25 | 2022-08-17T12:06:25 | 170,837,738 | 4 | 3 | null | 2023-01-02T10:09:08 | 2019-02-15T09:27:22 | Python | UTF-8 | Python | false | false | 2,652 | py | import argparse
from mpi4py import MPI
import numpy as np
from pygyro.model.grid import Grid
from pygyro.model.layout import LayoutSwapper, getLayoutHandler
from pygyro.poisson.poisson_solver import DensityFinder, QuasiNeutralitySolver
from pygyro.utilities.grid_plotter import SlicePlotterNd
from pygyro.initialisation.setups import setupCylindricalGrid
from pygyro.diagnostics.norms import l2
parser = argparse.ArgumentParser(
description='Plot the intial electric potential')
parser.add_argument('const_filename', type=str,
help='The constants file describing the setup')
args = parser.parse_args()
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
distribFunc, constants, t = setupCylindricalGrid(constantFile=args.const_filename,
layout='v_parallel',
comm=comm,
allocateSaveMemory=True)
nprocs = distribFunc.getLayout(distribFunc.currentLayout).nprocs[:2]
layout_poisson = {'v_parallel_2d': [0, 2, 1],
'mode_solve': [1, 2, 0]}
layout_vpar = {'v_parallel_1d': [0, 2, 1]}
layout_poloidal = {'poloidal': [2, 1, 0]}
remapperPhi = LayoutSwapper(comm, [layout_poisson, layout_vpar, layout_poloidal],
[nprocs, nprocs[0], nprocs[1]
], distribFunc.eta_grid[:3],
'mode_solve')
remapperRho = getLayoutHandler(
comm, layout_poisson, nprocs, distribFunc.eta_grid[:3])
phi = Grid(distribFunc.eta_grid[:3], distribFunc.getSpline(slice(0, 3)),
remapperPhi, 'mode_solve', comm, dtype=np.complex128)
rho = Grid(distribFunc.eta_grid[:3], distribFunc.getSpline(slice(0, 3)),
remapperRho, 'v_parallel_2d', comm, dtype=np.complex128)
density = DensityFinder(6, distribFunc.getSpline(3),
distribFunc.eta_grid, constants)
QNSolver = QuasiNeutralitySolver(distribFunc.eta_grid[:3], 7, distribFunc.getSpline(0),
constants, chi=0)
distribFunc.setLayout('v_parallel')
density.getPerturbedRho(distribFunc, rho)
QNSolver.getModes(rho)
rho.setLayout('mode_solve')
phi.setLayout('mode_solve')
QNSolver.solveEquation(phi, rho)
phi.setLayout('v_parallel_2d')
rho.setLayout('v_parallel_2d')
QNSolver.findPotential(phi)
norm = l2(distribFunc.eta_grid, remapperPhi.getLayout('v_parallel_2d'))
val = norm.l2NormSquared(phi)
print(val)
plotter = SlicePlotterNd(phi, 0, 1, True, sliderDimensions=[
2], sliderNames=['z'])
if (rank == 0):
plotter.show()
else:
plotter.calculation_complete()
| [
"[email protected]"
] | |
0f59ddf53e19bb9c1f3b0b8ef1a3e04546cc89e4 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/brackets_20200810105706.py | e35d818bc5c2a83d99fa7e410edda4e403b93436 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | def brackets(S):
# "{[()()]}"
stack = []
for i in S:
stack.append(i)
for i in S:
if i == "(" and stack.pop()
print(brackets("{[()()]}"))
| [
"[email protected]"
] | |
5328be94b6b7b5d34270b3276badb49bfb04b4f1 | d886f41ac037343b6b9652977f753808117e6246 | /Behaviors/FK_Relative_Reverse_01.py | 0a1594dbcc59fe2ead208d9d03c6eabe281422a2 | [] | no_license | TPayneExperience/TrevorPaynes_RigAndAnimSuite | 5e918be2de896fdacf2da039815e85b91cf0d7ed | 18e0482ca6d70277b6455d9a14e6b10406f1553f | refs/heads/master | 2023-09-03T04:14:48.862905 | 2021-11-10T02:50:54 | 2021-11-10T02:50:54 | 275,663,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,254 | py |
import pymel.core as pm
import Abstracts.Abstract_Behavior as absBhv
import Utilities.Rig_Utilities as rigUtil
import Utilities.Logger as log
class FK_Relative_01(absBhv.Abstract_Behavior):
bhvType = 'FK Relative Reverse'
validLimbTypes = (4,) # rigData.LIMB_TYPES
groupType = 'FKR' # LookAt, IKPV...
groupShape = 'Cube_Poly'
groupCount = 1
groupMoveable = False # for moving control pivots
uiOrderIndex = 250
usesJointControls = False
usesLimbControls = True
bakeLosesData = True
duplicateJointGroups = False
def InitLimb(self, limb):
log.funcFileDebug()
limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]
jointGroups = pm.listConnections(limb.jointGroups)
jointGroup = rigUtil.SortGroups(jointGroups)[-1]
joint = pm.listConnections(jointGroup.joint)[0]
pm.parent(limbGroup, joint)
rigUtil.ResetAttrs(limbGroup)
pm.parent(limbGroup, limb)
def CleanupLimb(self, limb):
log.funcFileDebug()
#============= FOR BEHAVIOR OPERATION ============================
def Setup_ForBhvOp(self, limb):
pass
def Teardown_ForBhvOp(self, limb):
pass
#============= SETUP ============================
def Setup_Rig_Controls(self, limb):
log.funcFileDebug()
limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]
limbControl = pm.listConnections(limbGroup.control)[0]
jointGroups = pm.listConnections(limb.jointGroups)
jointGroups = rigUtil.SortGroups(jointGroups)[::-1]
controls = []
# Parent control hierarchy
for i in range(len(jointGroups)-1):
childGroup = jointGroups[i+1]
parentCtr = pm.listConnections(jointGroups[i].control)[0]
pm.parent(childGroup, parentCtr)
controls.append(parentCtr)
# Parent Root Joint group to Control
childGroup = jointGroups[0]
pm.parentConstraint(limbControl, childGroup, mo=1)
# Bind rotations
multNode = pm.createNode('multiplyDivide')
pm.connectAttr(limbControl.rotate, multNode.input1)
scalar = 1.0/max(len(controls)-2, 1)
multNode.input2.set(scalar, scalar, scalar)
for childControl in controls[1:]:
pm.connectAttr(multNode.output, childControl.rotate)
# External
parentControl = rigUtil.GetParentControl(limb)
if parentControl:
pm.parentConstraint(parentControl, limbGroup, mo=1)
def Setup_Constraint_JointsToControls(self, limb):
log.funcFileDebug()
for group in pm.listConnections(limb.jointGroups):
joint = pm.listConnections(group.joint)[0]
control = pm.listConnections(group.control)[0]
pm.parentConstraint(control, joint, mo=1)
def Setup_Constraint_ControlsToXforms(self, limb,
xforms, hasPosCst, hasRotCst, hasScaleCst):
log.funcFileDebug()
limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]
limbControl = pm.listConnections(limbGroup.control)[0]
xform = xforms[-1]
if hasPosCst:
pm.pointConstraint(xform, limbControl, mo=1)
if hasRotCst:
pm.orientConstraint(xform, limbControl, mo=1)
if hasScaleCst:
pm.scaleConstraint(xform, limbControl)
return [limbControl]
#============= TEARDOWN ============================
def Teardown_Rig_Controls(self, limb):
log.funcFileDebug()
limbGroup = rigUtil.GetLimbGroups(limb, self.groupType)[0]
limbControl = pm.listConnections(limbGroup.control)[0]
conversionNode = pm.listConnections(limbControl.r)[0]
multNodes = pm.listConnections(conversionNode.output)
pm.delete(multNodes) # delete mult node
groups = pm.listConnections(limb.jointGroups)
groups = rigUtil.SortGroups(groups)[:-1]
pm.parent(groups, limb)
if pm.listConnections(limb.limbParent):
group = rigUtil.GetLimbGroups(limb, self.groupType)[0]
cst = pm.listRelatives(group, c=1, type='parentConstraint')
pm.delete(cst)
def Teardown_Constraint_JointsToControls(self, limb):
log.funcFileDebug()
jointGroups = pm.listConnections(limb.jointGroups)
joints = [pm.listConnections(g.joint)[0] for g in jointGroups]
for joint in joints:
cst = pm.listRelatives(joint, c=1, type='parentConstraint')
pm.delete(cst)
def Teardown_Constraint_ControlsToXforms(self, limb):
log.funcFileDebug()
group = rigUtil.GetLimbGroups(limb, self.groupType)[0]
control = pm.listConnections(group.control)[0]
pm.delete(pm.listRelatives(control, c=1, type='constraint'))
#============= EDITABLE UI ============================
def Setup_Behavior_Limb_UI(self, limb):
log.funcFileDebug()
return False
#============= ANIMATION UI ============================
def Setup_AnimationTools_Limb_UI(self, limb):
return False # return if UI is enabled
# Copyright (c) 2021 Trevor Payne
# See user license in "PayneFreeRigSuite\Data\LicenseAgreement.txt"
| [
"[email protected]"
] | |
10bf94250ae78f7e23d7e6bd2890662625883c6b | 555002c30895a1e2267d05d67d5167275ade3845 | /server/server.py | d2f825a62b33cfc1b7403d77eceaecff86615fcd | [] | no_license | odbite/jkpghack2016 | 159b2938fd8ab7a2a815c664a38c791f2fb440ec | 8b4f5b3ec555f3436f764c2b49927c200ff335a4 | refs/heads/master | 2021-01-10T05:52:52.600618 | 2016-02-27T17:41:07 | 2016-02-27T17:41:07 | 52,673,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 559 | py | from animals import AnimalApi
from flask import Flask, render_template
from flask_restful import Api
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
static_folder = os.path.join(BASE_DIR, 'client', 'app', 'dist')
print(static_folder)
app = Flask(__name__, template_folder='../client/app', static_path='/static', static_folder=static_folder)
api = Api(app)
api.add_resource(AnimalApi, '/api/animals')
@app.route("/")
def hello():
return render_template('index.html')
if __name__ == '__main__':
app.run(debug=True)
| [
"[email protected]"
] | |
13bdc405645b0b39b6eddec012b2abcc8c01a3de | 78dc15505e17cef3e49410bbadc1bb4812cdbbad | /foiamachine/local/apps/users/utils.py | 824afac2280c58b95134ab546eea5bc2cd47db3c | [
"MIT"
] | permissive | jgillum/foiamachine | 4a7e4ef9fec681341c014dbe7c98bbce79debb4e | 26d3b02870227696cdaab639c39d47b2a7a42ae5 | refs/heads/master | 2020-06-29T11:19:46.232758 | 2019-08-19T02:27:45 | 2019-08-19T02:27:45 | 200,519,075 | 3 | 1 | null | 2019-08-04T16:57:27 | 2019-08-04T16:57:27 | null | UTF-8 | Python | false | false | 54 | py | /home/foiamachine/repo/foiamachine/apps/users/utils.py | [
"[email protected]"
] | |
9f4802a0adb12e9e53c888ddc1d995e8c04f2963 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/web/get_static_site_user_provided_function_app_for_static_site.py | f4136b23143201325f3c527173a8c7c478e1d846 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,754 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetStaticSiteUserProvidedFunctionAppForStaticSiteResult',
'AwaitableGetStaticSiteUserProvidedFunctionAppForStaticSiteResult',
'get_static_site_user_provided_function_app_for_static_site',
]
@pulumi.output_type
class GetStaticSiteUserProvidedFunctionAppForStaticSiteResult:
"""
Static Site User Provided Function App ARM resource.
"""
def __init__(__self__, created_on=None, function_app_region=None, function_app_resource_id=None, id=None, kind=None, name=None, type=None):
if created_on and not isinstance(created_on, str):
raise TypeError("Expected argument 'created_on' to be a str")
pulumi.set(__self__, "created_on", created_on)
if function_app_region and not isinstance(function_app_region, str):
raise TypeError("Expected argument 'function_app_region' to be a str")
pulumi.set(__self__, "function_app_region", function_app_region)
if function_app_resource_id and not isinstance(function_app_resource_id, str):
raise TypeError("Expected argument 'function_app_resource_id' to be a str")
pulumi.set(__self__, "function_app_resource_id", function_app_resource_id)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> str:
"""
The date and time on which the function app was registered with the static site.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter(name="functionAppRegion")
def function_app_region(self) -> Optional[str]:
"""
The region of the function app registered with the static site
"""
return pulumi.get(self, "function_app_region")
@property
@pulumi.getter(name="functionAppResourceId")
def function_app_resource_id(self) -> Optional[str]:
"""
The resource id of the function app registered with the static site
"""
return pulumi.get(self, "function_app_resource_id")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Kind of resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource Name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetStaticSiteUserProvidedFunctionAppForStaticSiteResult(GetStaticSiteUserProvidedFunctionAppForStaticSiteResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetStaticSiteUserProvidedFunctionAppForStaticSiteResult(
created_on=self.created_on,
function_app_region=self.function_app_region,
function_app_resource_id=self.function_app_resource_id,
id=self.id,
kind=self.kind,
name=self.name,
type=self.type)
def get_static_site_user_provided_function_app_for_static_site(function_app_name: Optional[str] = None,
name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetStaticSiteUserProvidedFunctionAppForStaticSiteResult:
"""
Static Site User Provided Function App ARM resource.
API Version: 2020-12-01.
:param str function_app_name: Name of the function app registered with the static site.
:param str name: Name of the static site.
:param str resource_group_name: Name of the resource group to which the resource belongs.
"""
__args__ = dict()
__args__['functionAppName'] = function_app_name
__args__['name'] = name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:web:getStaticSiteUserProvidedFunctionAppForStaticSite', __args__, opts=opts, typ=GetStaticSiteUserProvidedFunctionAppForStaticSiteResult).value
return AwaitableGetStaticSiteUserProvidedFunctionAppForStaticSiteResult(
created_on=__ret__.created_on,
function_app_region=__ret__.function_app_region,
function_app_resource_id=__ret__.function_app_resource_id,
id=__ret__.id,
kind=__ret__.kind,
name=__ret__.name,
type=__ret__.type)
| [
"[email protected]"
] | |
51f1291b2afb40a6c8d8781e7bc461ba3d058225 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03838/s497427946.py | 53bb3ecb9f9567bfcac1e11d066677c406a3138b | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 302 | py | x, y = map(int, input().split())
cnt = 0
diff = abs(x) - abs(y)
if diff == 0:
if x * y < 0:
cnt += 1
elif diff > 0:
cnt += diff
if x > 0:
cnt += 1
if y > 0:
cnt += 1
else:
cnt += -diff
if x < 0:
cnt += 1
if y < 0:
cnt += 1
print(cnt)
| [
"[email protected]"
] | |
eae409f0cfe112314878b3129c19172958517b96 | d3210868266ce3f0c17d0777c157da82402d3ed7 | /horizon/openstack_dashboard/dashboards/project/instances/tables.py | 8e610ce626c3c1a3077c52d4315550ca1a3ece88 | [
"Apache-2.0"
] | permissive | cauberong099/openstack | 4f0bb1671bf3f2421a756c8b3bfcd7b344e07096 | 4fc261d37d84126d364de50fbc6ca98b8dc8dd39 | refs/heads/master | 2021-01-10T19:44:22.108399 | 2015-03-28T02:46:21 | 2015-03-28T02:46:21 | 33,003,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40,216 | py | # Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core import urlresolvers
from django.http import HttpResponse # noqa
from django import shortcuts
from django import template
from django.template.defaultfilters import title # noqa
from django.utils.http import urlencode
from django.utils.translation import npgettext_lazy
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import conf
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.templatetags import sizeformat
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.access_and_security.floating_ips \
import workflows
from openstack_dashboard.dashboards.project.instances import tabs
from openstack_dashboard.dashboards.project.instances.workflows \
import resize_instance
from openstack_dashboard.dashboards.project.instances.workflows \
import update_instance
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
ACTIVE_STATES = ("ACTIVE",)
VOLUME_ATTACH_READY_STATES = ("ACTIVE", "SHUTOFF")
SNAPSHOT_READY_STATES = ("ACTIVE", "SHUTOFF", "PAUSED", "SUSPENDED")
POWER_STATES = {
0: "NO STATE",
1: "RUNNING",
2: "BLOCKED",
3: "PAUSED",
4: "SHUTDOWN",
5: "SHUTOFF",
6: "CRASHED",
7: "SUSPENDED",
8: "FAILED",
9: "BUILDING",
}
PAUSE = 0
UNPAUSE = 1
SUSPEND = 0
RESUME = 1
def is_deleting(instance):
task_state = getattr(instance, "OS-EXT-STS:task_state", None)
if not task_state:
return False
return task_state.lower() == "deleting"
class TerminateInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "terminate"
classes = ("btn-danger",)
icon = "remove"
policy_rules = (("compute", "compute:delete"),)
help_text = _("Terminated instances are not recoverable.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Terminate Instance",
u"Terminate Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled termination of Instance",
u"Scheduled termination of Instances",
count
)
def allowed(self, request, instance=None):
"""Allow terminate action if instance not currently being deleted."""
return not is_deleting(instance)
def action(self, request, obj_id):
api.nova.server_delete(request, obj_id)
class RebootInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "reboot"
classes = ('btn-danger', 'btn-reboot')
policy_rules = (("compute", "compute:reboot"),)
help_text = _("Restarted instances will lose any data"
" not saved in persistent storage.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Hard Reboot Instance",
u"Hard Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Hard Rebooted Instance",
u"Hard Rebooted Instances",
count
)
def allowed(self, request, instance=None):
if instance is not None:
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
else:
return True
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=False)
class SoftRebootInstance(RebootInstance):
name = "soft_reboot"
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Soft Reboot Instance",
u"Soft Reboot Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Soft Rebooted Instance",
u"Soft Rebooted Instances",
count
)
def action(self, request, obj_id):
api.nova.server_reboot(request, obj_id, soft_reboot=True)
class TogglePause(tables.BatchAction):
name = "pause"
icon = "pause"
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Pause Instance",
u"Pause Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Paused Instance",
u"Paused Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.paused = instance.status == "PAUSED"
if self.paused:
self.current_present_action = UNPAUSE
policy = (("compute", "compute_extension:admin_actions:unpause"),)
else:
self.current_present_action = PAUSE
policy = (("compute", "compute_extension:admin_actions:pause"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.paused)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.paused:
api.nova.server_unpause(request, obj_id)
self.current_past_action = UNPAUSE
else:
api.nova.server_pause(request, obj_id)
self.current_past_action = PAUSE
class ToggleSuspend(tables.BatchAction):
name = "suspend"
classes = ("btn-suspend",)
@staticmethod
def action_present(count):
return (
ungettext_lazy(
u"Suspend Instance",
u"Suspend Instances",
count
),
ungettext_lazy(
u"Resume Instance",
u"Resume Instances",
count
),
)
@staticmethod
def action_past(count):
return (
ungettext_lazy(
u"Suspended Instance",
u"Suspended Instances",
count
),
ungettext_lazy(
u"Resumed Instance",
u"Resumed Instances",
count
),
)
def allowed(self, request, instance=None):
if not api.nova.extension_supported('AdminActions',
request):
return False
if not instance:
return False
self.suspended = instance.status == "SUSPENDED"
if self.suspended:
self.current_present_action = RESUME
policy = (("compute", "compute_extension:admin_actions:resume"),)
else:
self.current_present_action = SUSPEND
policy = (("compute", "compute_extension:admin_actions:suspend"),)
has_permission = True
policy_check = getattr(settings, "POLICY_CHECK_FUNCTION", None)
if policy_check:
has_permission = policy_check(
policy, request,
target={'project_id': getattr(instance, 'tenant_id', None)})
return (has_permission
and (instance.status in ACTIVE_STATES or self.suspended)
and not is_deleting(instance))
def action(self, request, obj_id):
if self.suspended:
api.nova.server_resume(request, obj_id)
self.current_past_action = RESUME
else:
api.nova.server_suspend(request, obj_id)
self.current_past_action = SUSPEND
class LaunchLink(tables.LinkAction):
name = "launch"
verbose_name = _("Launch Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def allowed(self, request, datum):
try:
limits = api.nova.tenant_absolute_limits(request, reserved=True)
instances_available = limits['maxTotalInstances'] \
- limits['totalInstancesUsed']
cores_available = limits['maxTotalCores'] \
- limits['totalCoresUsed']
ram_available = limits['maxTotalRAMSize'] - limits['totalRAMUsed']
if instances_available <= 0 or cores_available <= 0 \
or ram_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Launch Instance")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
except Exception:
LOG.exception("Failed to retrieve quota information")
# If we can't get the quota information, leave it to the
# API to check when launching
return True # The action should always be displayed
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class LaunchLinkNG(LaunchLink):
name = "launch-ng"
verbose_name = _("Launch Instance NG")
ajax = False
classes = ("btn-launch")
def __init__(self,
attrs={
"ng-controller": "LaunchInstanceModalCtrl",
"ng-click": "openLaunchInstanceWizard()"
},
**kwargs):
kwargs['preempt'] = True
super(LaunchLink, self).__init__(attrs, **kwargs)
def get_link_url(self, datum=None):
return "javascript:void(0);"
class EditInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Instance")
url = "horizon:project:instances:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("compute", "compute:update"),)
def get_link_url(self, project):
return self._get_link_url(project, 'instance_info')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
update_instance.UpdateInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return not is_deleting(instance)
class EditInstanceSecurityGroups(EditInstance):
name = "edit_secgroups"
verbose_name = _("Edit Security Groups")
def get_link_url(self, project):
return self._get_link_url(project, 'update_security_groups')
def allowed(self, request, instance=None):
return (instance.status in ACTIVE_STATES and
not is_deleting(instance) and
request.user.tenant_id == instance.tenant_id)
class CreateSnapshot(policy.PolicyTargetMixin, tables.LinkAction):
name = "snapshot"
verbose_name = _("Create Snapshot")
url = "horizon:project:images:snapshots:create"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("compute", "compute:snapshot"),)
def allowed(self, request, instance=None):
return instance.status in SNAPSHOT_READY_STATES \
and not is_deleting(instance)
class ConsoleLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "console"
verbose_name = _("Console")
url = "horizon:project:instances:detail"
classes = ("btn-console",)
policy_rules = (("compute", "compute_extension:consoles"),)
def allowed(self, request, instance=None):
# We check if ConsoleLink is allowed only if settings.CONSOLE_TYPE is
# not set at all, or if it's set to any value other than None or False.
return bool(getattr(settings, 'CONSOLE_TYPE', True)) and \
instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(ConsoleLink, self).get_link_url(datum)
tab_query_string = tabs.ConsoleTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class LogLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "log"
verbose_name = _("View Log")
url = "horizon:project:instances:detail"
classes = ("btn-log",)
policy_rules = (("compute", "compute_extension:console_output"),)
def allowed(self, request, instance=None):
return instance.status in ACTIVE_STATES and not is_deleting(instance)
def get_link_url(self, datum):
base_url = super(LogLink, self).get_link_url(datum)
tab_query_string = tabs.LogTab(
tabs.InstanceDetailTabs).get_query_string()
return "?".join([base_url, tab_query_string])
class ResizeLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "resize"
verbose_name = _("Resize Instance")
url = "horizon:project:instances:resize"
classes = ("ajax-modal", "btn-resize")
policy_rules = (("compute", "compute:resize"),)
def get_link_url(self, project):
return self._get_link_url(project, 'flavor_choice')
def _get_link_url(self, project, step_slug):
base_url = urlresolvers.reverse(self.url, args=[project.id])
next_url = self.table.get_full_url()
params = {"step": step_slug,
resize_instance.ResizeInstance.redirect_param_name: next_url}
param = urlencode(params)
return "?".join([base_url, param])
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
class ConfirmResize(policy.PolicyTargetMixin, tables.Action):
name = "confirm"
verbose_name = _("Confirm Resize/Migrate")
classes = ("btn-confirm", "btn-action-required")
policy_rules = (("compute", "compute:confirm_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_confirm_resize(request, instance)
class RevertResize(policy.PolicyTargetMixin, tables.Action):
name = "revert"
verbose_name = _("Revert Resize/Migrate")
classes = ("btn-revert", "btn-action-required")
policy_rules = (("compute", "compute:revert_resize"),)
def allowed(self, request, instance):
return instance.status == 'VERIFY_RESIZE'
def single(self, table, request, instance):
api.nova.server_revert_resize(request, instance)
class RebuildInstance(policy.PolicyTargetMixin, tables.LinkAction):
name = "rebuild"
verbose_name = _("Rebuild Instance")
classes = ("btn-rebuild", "ajax-modal")
url = "horizon:project:instances:rebuild"
policy_rules = (("compute", "compute:rebuild"),)
def allowed(self, request, instance):
return ((instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance))
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
return urlresolvers.reverse(self.url, args=[instance_id])
class DecryptInstancePassword(tables.LinkAction):
name = "decryptpassword"
verbose_name = _("Retrieve Password")
classes = ("btn-decrypt", "ajax-modal")
url = "horizon:project:instances:decryptpassword"
def allowed(self, request, instance):
enable = getattr(settings,
'OPENSTACK_ENABLE_PASSWORD_RETRIEVE',
False)
return (enable
and (instance.status in ACTIVE_STATES
or instance.status == 'SHUTOFF')
and not is_deleting(instance)
and get_keyname(instance) is not None)
def get_link_url(self, datum):
instance_id = self.table.get_object_id(datum)
keypair_name = get_keyname(datum)
return urlresolvers.reverse(self.url, args=[instance_id,
keypair_name])
class AssociateIP(policy.PolicyTargetMixin, tables.LinkAction):
name = "associate"
verbose_name = _("Associate Floating IP")
url = "horizon:project:access_and_security:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if api.network.floating_ip_simple_associate_supported(request):
return False
return not is_deleting(instance)
def get_link_url(self, datum):
base_url = urlresolvers.reverse(self.url)
next_url = self.table.get_full_url()
params = {
"instance_id": self.table.get_object_id(datum),
workflows.IPAssociationWorkflow.redirect_param_name: next_url}
params = urlencode(params)
return "?".join([base_url, params])
class SimpleAssociateIP(policy.PolicyTargetMixin, tables.Action):
name = "associate-simple"
verbose_name = _("Associate Floating IP")
icon = "link"
policy_rules = (("compute", "network:associate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_simple_associate_supported(request):
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
target_id = api.network.floating_ip_target_get_by_instance(
request, instance_id).split('_')[0]
fip = api.network.tenant_floating_ip_allocate(request)
api.network.floating_ip_associate(request, fip.id, target_id)
messages.success(request,
_("Successfully associated floating IP: %s")
% fip.ip)
except Exception:
exceptions.handle(request,
_("Unable to associate floating IP."))
return shortcuts.redirect(request.get_full_path())
class SimpleDisassociateIP(policy.PolicyTargetMixin, tables.Action):
name = "disassociate"
verbose_name = _("Disassociate Floating IP")
classes = ("btn-danger", "btn-disassociate",)
policy_rules = (("compute", "network:disassociate_floating_ip"),)
def allowed(self, request, instance):
if not api.network.floating_ip_supported(request):
return False
if not conf.HORIZON_CONFIG["simple_ip_management"]:
return False
return not is_deleting(instance)
def single(self, table, request, instance_id):
try:
# target_id is port_id for Neutron and instance_id for Nova Network
# (Neutron API wrapper returns a 'portid_fixedip' string)
targets = api.network.floating_ip_target_list_by_instance(
request, instance_id)
target_ids = [t.split('_')[0] for t in targets]
fips = [fip for fip in api.network.tenant_floating_ip_list(request)
if fip.port_id in target_ids]
# Removing multiple floating IPs at once doesn't work, so this pops
# off the first one.
if fips:
fip = fips.pop()
api.network.floating_ip_disassociate(request, fip.id)
messages.success(request,
_("Successfully disassociated "
"floating IP: %s") % fip.ip)
else:
messages.info(request, _("No floating IPs to disassociate."))
except Exception:
exceptions.handle(request,
_("Unable to disassociate floating IP."))
return shortcuts.redirect(request.get_full_path())
def instance_fault_to_friendly_message(instance):
fault = getattr(instance, 'fault', {})
message = fault.get('message', _("Unknown"))
default_message = _("Please try again later [Error: %s].") % message
fault_map = {
'NoValidHost': _("There is not enough capacity for this "
"flavor in the selected availability zone. "
"Try again later or select a different availability "
"zone.")
}
return fault_map.get(message, default_message)
def get_instance_error(instance):
if instance.status.lower() != 'error':
return None
message = instance_fault_to_friendly_message(instance)
preamble = _('Failed to perform requested operation on instance "%s", the '
'instance has an error status') % instance.name or instance.id
message = string_concat(preamble, ': ', message)
return message
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, instance_id):
instance = api.nova.server_get(request, instance_id)
try:
instance.full_flavor = api.nova.flavor_get(request,
instance.flavor["id"])
except Exception:
exceptions.handle(request,
_('Unable to retrieve flavor information '
'for instance "%s".') % instance_id,
ignore=True)
error = get_instance_error(instance)
if error:
messages.error(request, error)
return instance
class StartInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "start"
classes = ('btn-confirm',)
policy_rules = (("compute", "compute:start"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Start Instance",
u"Start Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Started Instance",
u"Started Instances",
count
)
def allowed(self, request, instance):
return ((instance is None) or
(instance.status in ("SHUTDOWN", "SHUTOFF", "CRASHED")))
def action(self, request, obj_id):
api.nova.server_start(request, obj_id)
class StopInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "stop"
classes = ('btn-danger',)
policy_rules = (("compute", "compute:stop"),)
help_text = _("To power off a specific instance.")
@staticmethod
def action_present(count):
return npgettext_lazy(
"Action to perform (the instance is currently running)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
@staticmethod
def action_past(count):
return npgettext_lazy(
"Past action (the instance is currently already Shut Off)",
u"Shut Off Instance",
u"Shut Off Instances",
count
)
def allowed(self, request, instance):
return ((instance is None)
or ((get_power_state(instance) in ("RUNNING", "SUSPENDED"))
and not is_deleting(instance)))
def action(self, request, obj_id):
api.nova.server_stop(request, obj_id)
class LockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "lock"
policy_rules = (("compute", "compute_extension:admin_actions:lock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Lock Instance",
u"Lock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Locked Instance",
u"Locked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow unlocked instances to be locked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_lock(request, obj_id)
class UnlockInstance(policy.PolicyTargetMixin, tables.BatchAction):
name = "unlock"
policy_rules = (("compute", "compute_extension:admin_actions:unlock"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Unlock Instance",
u"Unlock Instances",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Unlocked Instance",
u"Unlocked Instances",
count
)
# TODO(akrivoka): When the lock status is added to nova, revisit this
# to only allow locked instances to be unlocked
def allowed(self, request, instance):
if not api.nova.extension_supported('AdminActions', request):
return False
return True
def action(self, request, obj_id):
api.nova.server_unlock(request, obj_id)
def get_ips(instance):
template_name = 'project/instances/_instance_ips.html'
ip_groups = {}
for ip_group, addresses in instance.addresses.iteritems():
ip_groups[ip_group] = {}
ip_groups[ip_group]["floating"] = []
ip_groups[ip_group]["non_floating"] = []
for address in addresses:
if ('OS-EXT-IPS:type' in address and
address['OS-EXT-IPS:type'] == "floating"):
ip_groups[ip_group]["floating"].append(address)
else:
ip_groups[ip_group]["non_floating"].append(address)
context = {
"ip_groups": ip_groups,
}
return template.loader.render_to_string(template_name, context)
def get_size(instance):
if hasattr(instance, "full_flavor"):
template_name = 'project/instances/_instance_flavor.html'
size_ram = sizeformat.mb_float_format(instance.full_flavor.ram)
if instance.full_flavor.disk > 0:
size_disk = sizeformat.diskgbformat(instance.full_flavor.disk)
else:
size_disk = _("%s GB") % "0"
context = {
"name": instance.full_flavor.name,
"id": instance.id,
"size_disk": size_disk,
"size_ram": size_ram,
"vcpus": instance.full_flavor.vcpus,
"flavor_id": instance.full_flavor.id
}
return template.loader.render_to_string(template_name, context)
return _("Not available")
def get_keyname(instance):
if hasattr(instance, "key_name"):
keyname = instance.key_name
return keyname
return _("Not available")
def get_power_state(instance):
return POWER_STATES.get(getattr(instance, "OS-EXT-STS:power_state", 0), '')
STATUS_DISPLAY_CHOICES = (
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("active", pgettext_lazy("Current status of an Instance", u"Active")),
("shutoff", pgettext_lazy("Current status of an Instance", u"Shutoff")),
("suspended", pgettext_lazy("Current status of an Instance",
u"Suspended")),
("paused", pgettext_lazy("Current status of an Instance", u"Paused")),
("error", pgettext_lazy("Current status of an Instance", u"Error")),
("resize", pgettext_lazy("Current status of an Instance",
u"Resize/Migrate")),
("verify_resize", pgettext_lazy("Current status of an Instance",
u"Confirm or Revert Resize/Migrate")),
("revert_resize", pgettext_lazy(
"Current status of an Instance", u"Revert Resize/Migrate")),
("reboot", pgettext_lazy("Current status of an Instance", u"Reboot")),
("hard_reboot", pgettext_lazy("Current status of an Instance",
u"Hard Reboot")),
("password", pgettext_lazy("Current status of an Instance", u"Password")),
("rebuild", pgettext_lazy("Current status of an Instance", u"Rebuild")),
("migrating", pgettext_lazy("Current status of an Instance",
u"Migrating")),
("build", pgettext_lazy("Current status of an Instance", u"Build")),
("rescue", pgettext_lazy("Current status of an Instance", u"Rescue")),
("deleted", pgettext_lazy("Current status of an Instance", u"Deleted")),
("soft_deleted", pgettext_lazy("Current status of an Instance",
u"Soft Deleted")),
("shelved", pgettext_lazy("Current status of an Instance", u"Shelved")),
("shelved_offloaded", pgettext_lazy("Current status of an Instance",
u"Shelved Offloaded")),
)
TASK_DISPLAY_NONE = pgettext_lazy("Task status of an Instance", u"None")
# Mapping of task states taken from Nova's nova/compute/task_states.py
TASK_DISPLAY_CHOICES = (
("scheduling", pgettext_lazy("Task status of an Instance",
u"Scheduling")),
("block_device_mapping", pgettext_lazy("Task status of an Instance",
u"Block Device Mapping")),
("networking", pgettext_lazy("Task status of an Instance",
u"Networking")),
("spawning", pgettext_lazy("Task status of an Instance", u"Spawning")),
("image_snapshot", pgettext_lazy("Task status of an Instance",
u"Snapshotting")),
("image_snapshot_pending", pgettext_lazy("Task status of an Instance",
u"Image Snapshot Pending")),
("image_pending_upload", pgettext_lazy("Task status of an Instance",
u"Image Pending Upload")),
("image_uploading", pgettext_lazy("Task status of an Instance",
u"Image Uploading")),
("image_backup", pgettext_lazy("Task status of an Instance",
u"Image Backup")),
("updating_password", pgettext_lazy("Task status of an Instance",
u"Updating Password")),
("resize_prep", pgettext_lazy("Task status of an Instance",
u"Preparing Resize or Migrate")),
("resize_migrating", pgettext_lazy("Task status of an Instance",
u"Resizing or Migrating")),
("resize_migrated", pgettext_lazy("Task status of an Instance",
u"Resized or Migrated")),
("resize_finish", pgettext_lazy("Task status of an Instance",
u"Finishing Resize or Migrate")),
("resize_reverting", pgettext_lazy("Task status of an Instance",
u"Reverting Resize or Migrate")),
("resize_confirming", pgettext_lazy("Task status of an Instance",
u"Confirming Resize or Migrate")),
("rebooting", pgettext_lazy("Task status of an Instance", u"Rebooting")),
("reboot_pending", pgettext_lazy("Task status of an Instance",
u"Reboot Pending")),
("reboot_started", pgettext_lazy("Task status of an Instance",
u"Reboot Started")),
("rebooting_hard", pgettext_lazy("Task status of an Instance",
u"Rebooting Hard")),
("reboot_pending_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Pending Hard")),
("reboot_started_hard", pgettext_lazy("Task status of an Instance",
u"Reboot Started Hard")),
("pausing", pgettext_lazy("Task status of an Instance", u"Pausing")),
("unpausing", pgettext_lazy("Task status of an Instance", u"Resuming")),
("suspending", pgettext_lazy("Task status of an Instance",
u"Suspending")),
("resuming", pgettext_lazy("Task status of an Instance", u"Resuming")),
("powering-off", pgettext_lazy("Task status of an Instance",
u"Powering Off")),
("powering-on", pgettext_lazy("Task status of an Instance",
u"Powering On")),
("rescuing", pgettext_lazy("Task status of an Instance", u"Rescuing")),
("unrescuing", pgettext_lazy("Task status of an Instance",
u"Unrescuing")),
("rebuilding", pgettext_lazy("Task status of an Instance",
u"Rebuilding")),
("rebuild_block_device_mapping", pgettext_lazy(
"Task status of an Instance", u"Rebuild Block Device Mapping")),
("rebuild_spawning", pgettext_lazy("Task status of an Instance",
u"Rebuild Spawning")),
("migrating", pgettext_lazy("Task status of an Instance", u"Migrating")),
("deleting", pgettext_lazy("Task status of an Instance", u"Deleting")),
("soft-deleting", pgettext_lazy("Task status of an Instance",
u"Soft Deleting")),
("restoring", pgettext_lazy("Task status of an Instance", u"Restoring")),
("shelving", pgettext_lazy("Task status of an Instance", u"Shelving")),
("shelving_image_pending_upload", pgettext_lazy(
"Task status of an Instance", u"Shelving Image Pending Upload")),
("shelving_image_uploading", pgettext_lazy("Task status of an Instance",
u"Shelving Image Uploading")),
("shelving_offloading", pgettext_lazy("Task status of an Instance",
u"Shelving Offloading")),
("unshelving", pgettext_lazy("Task status of an Instance",
u"Unshelving")),
)
POWER_DISPLAY_CHOICES = (
("NO STATE", pgettext_lazy("Power state of an Instance", u"No State")),
("RUNNING", pgettext_lazy("Power state of an Instance", u"Running")),
("BLOCKED", pgettext_lazy("Power state of an Instance", u"Blocked")),
("PAUSED", pgettext_lazy("Power state of an Instance", u"Paused")),
("SHUTDOWN", pgettext_lazy("Power state of an Instance", u"Shut Down")),
("SHUTOFF", pgettext_lazy("Power state of an Instance", u"Shut Off")),
("CRASHED", pgettext_lazy("Power state of an Instance", u"Crashed")),
("SUSPENDED", pgettext_lazy("Power state of an Instance", u"Suspended")),
("FAILED", pgettext_lazy("Power state of an Instance", u"Failed")),
("BUILDING", pgettext_lazy("Power state of an Instance", u"Building")),
)
class InstancesFilterAction(tables.FilterAction):
filter_type = "server"
filter_choices = (('name', _("Instance Name"), True),
('status', _("Status ="), True),
('image', _("Image ID ="), True),
('flavor', _("Flavor ID ="), True))
class InstancesTable(tables.DataTable):
TASK_STATUS_CHOICES = (
(None, True),
("none", True)
)
STATUS_CHOICES = (
("active", True),
("shutoff", True),
("suspended", True),
("paused", True),
("error", False),
("rescue", True),
("shelved", True),
("shelved_offloaded", True),
)
name = tables.Column("name",
link="horizon:project:instances:detail",
verbose_name=_("Instance Name"))
image_name = tables.Column("image_name",
verbose_name=_("Image Name"))
ip = tables.Column(get_ips,
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
keypair = tables.Column(get_keyname, verbose_name=_("Key Pair"))
status = tables.Column("status",
filters=(title, filters.replace_underscores),
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
az = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
task = tables.Column("OS-EXT-STS:task_state",
verbose_name=_("Task"),
empty_value=TASK_DISPLAY_NONE,
status=True,
status_choices=TASK_STATUS_CHOICES,
display_choices=TASK_DISPLAY_CHOICES)
state = tables.Column(get_power_state,
filters=(title, filters.replace_underscores),
verbose_name=_("Power State"),
display_choices=POWER_DISPLAY_CHOICES)
created = tables.Column("created",
verbose_name=_("Time since created"),
filters=(filters.parse_isotime,
filters.timesince_sortable),
attrs={'data-type': 'timesince'})
class Meta(object):
name = "instances"
verbose_name = _("Instances")
status_columns = ["status", "task"]
row_class = UpdateRow
table_actions_menu = (StartInstance, StopInstance, SoftRebootInstance)
launch_actions = ()
if getattr(settings, 'LAUNCH_INSTANCE_LEGACY_ENABLED', True):
launch_actions = (LaunchLink,) + launch_actions
if getattr(settings, 'LAUNCH_INSTANCE_NG_ENABLED', False):
launch_actions = (LaunchLinkNG,) + launch_actions
table_actions = launch_actions + (TerminateInstance,
InstancesFilterAction)
row_actions = (StartInstance, ConfirmResize, RevertResize,
CreateSnapshot, SimpleAssociateIP, AssociateIP,
SimpleDisassociateIP, EditInstance,
DecryptInstancePassword, EditInstanceSecurityGroups,
ConsoleLink, LogLink, TogglePause, ToggleSuspend,
ResizeLink, LockInstance, UnlockInstance,
SoftRebootInstance, RebootInstance,
StopInstance, RebuildInstance, TerminateInstance)
| [
"[email protected]"
] | |
6234237d22d1e2a20cc1766714f825df98311bc5 | b0b21bd961031083ef2ff04e2c71648192e181bc | /snippets/popular_libraries/click-lib/intermediate-features/click_prompting.py | 821cd2007448e2464760e7ef0930ebb6d7949af7 | [
"Apache-2.0"
] | permissive | melvio/python3-examples | 3406b732b071fb8a23e296b9a2e5aab9e7a04deb | 5340fe17e0a5001a81cf195e63f825b77dc16fca | refs/heads/main | 2023-08-25T06:33:34.743019 | 2021-10-11T11:29:20 | 2021-10-11T11:29:20 | 396,391,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 104 | py | import click
user_input = click.prompt(text="Folder name", default="Download")
print(f"{user_input=}")
| [
"[email protected]"
] | |
83fa096992b60ee9f25862dd01b9c52b2c6c1ea5 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3_neat/16_0_3_JawBone_main.py | 9103e18ea816c4880314a942a1d0134a68bf0711 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,228 | py | def cpaf(rn):
for divisor in xrange(2, 100):
if not rn % divisor:
return (False, divisor)
return (True, 1)
def baseconverter(rn, basefrom):
digits = "0123456789"
result = ""
while True:
remains = rn % basefrom
result = digits[remains] + result
rn = rn / basefrom
if rn == 0:
break
return result
lines = raw_input()
for question_index in xrange(1, int(lines) + 1):
length_of_jamcoin, types_of_jamcoin = [int(s) for s in raw_input().split(" ")]
answer_list = []
count = 0
for index in xrange(1, pow(2, length_of_jamcoin)):
inside = baseconverter(index, 2)
if len(str(inside)) < length_of_jamcoin - 1:
result = str(inside).zfill(length_of_jamcoin - 2)
temp_testcase = '1' + result + '1'
answers = temp_testcase
for i in xrange(2, 11):
temp = cpaf(int(temp_testcase, i))
if not temp[0]:
answers += ' ' + str(temp[1])
if answers.count(' ') >= 9:
answer_list.append(answers)
if len(answer_list) >= types_of_jamcoin:
break
print 'Case #1:'
for ans in answer_list:
print ans | [
"[[email protected]]"
] | |
2ddd55605050c3b2aa54d92f04a82d1be4921a8f | 727f1bc2205c88577b419cf0036c029b8c6f7766 | /out-bin/py/google/fhir/models/run_locally.runfiles/com_google_fhir/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/layers/layers.py | 5b4a0b637a1140402fc92bb72547a407526c443b | [
"Apache-2.0"
] | permissive | rasalt/fhir | 55cf78feed3596a3101b86f9e9bbf6652c6ed4ad | d49883cc4d4986e11ca66058d5a327691e6e048a | refs/heads/master | 2020-04-13T00:16:54.050913 | 2019-01-15T14:22:15 | 2019-01-15T14:22:15 | 160,260,223 | 0 | 0 | Apache-2.0 | 2018-12-03T22:07:01 | 2018-12-03T22:07:01 | null | UTF-8 | Python | false | false | 175 | py | /home/rkharwar/.cache/bazel/_bazel_rkharwar/c4bcd65252c8f8250f091ba96375f9a5/external/pypi__tensorflow_1_12_0/tensorflow-1.12.0.data/purelib/tensorflow/python/layers/layers.py | [
"[email protected]"
] | |
e1d28343bba645d8be668da7b073af3541987896 | 383d711b269aa42ec051a8300f9bad8cd3384de8 | /docker/models.py | 718aa7f04973c627897a573e40c8adb538b13cc7 | [] | no_license | Lupino/docker-server | 7af8dab451528704f470a19ae07fbd99afb47435 | 4a199e7e75dcf5ba5161a5373214bb03e8e2cf25 | refs/heads/master | 2021-01-10T19:30:42.888559 | 2014-04-01T07:23:22 | 2014-04-01T07:23:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,867 | py | from docker.conf import prefix
from lee import Model, query, Table, conf as lee_conf
from docker.logging import logger
class _Container(Model):
table_name = '{}container'.format(prefix)
columns = [
{'name': 'container_id', 'type': 'str', 'primary': True, 'length': 32},
{'name': 'image_id', 'type': 'str', 'length': 32},
{'name': 'passwd', 'type': 'str', 'length': 32},
{'name': 'ssh_port', 'type': 'int', 'unsigned': True, 'length': 5, 'default': 0},
{'name': 'server_port', 'type': 'int', 'unsigned': True, 'length': 5, 'default': 0},
{'name': 'created_at', 'type': 'int', 'unsigned': True, 'length': 10, 'default': 0},
{'name': 'stop_at', 'type': 'int', 'unsigned': True, 'length': 10, 'default': 0},
]
Container = Table(_Container)
class _UserContainer(Model):
table_name = '{}user_container'.format(prefix)
columns = [
{'name': 'user_id', 'type': 'int', 'length': 10, 'unsigned': True, 'primary': True},
{'name': 'container_id', 'type': 'str', 'length': 32, 'primary': True, 'unique': True}
]
UserContainer = Table(_UserContainer)
class _User(Model):
table_name = '{}user'.format(prefix)
columns = [
{'name': 'user_id', 'type': 'int', 'length': 10, 'unsigned': True, 'primary': True, 'auto_increment': True},
{'name': 'username', 'type': 'str', 'length': 50, 'unique': True},
{'name': 'passwd', 'type': 'str', 'length': 32},
{'name': 'email', 'type': 'str', 'length': 100, 'unique': True}
]
User = Table(_User)
class Sequence(Model):
table_name = 'sequence'
columns = [
{'name': 'name', 'type': 'str', 'primary': True, 'length': 20},
{'name': 'id', 'type': 'int', 'default': 0}
]
@query(autocommit=True)
def next(self, name, cur):
name = '{}:{}'.format(prefix, name)
last_id = 0
if lee_conf.use_mysql:
sql = 'INSERT INTO `sequence` (`name`) VALUES (?) ON DUPLICATE KEY UPDATE `id` = LAST_INSERT_ID(`id` + 1)'
args = (name, )
logger.debug('Query> SQL: %s | ARGS: %s'%(sql, args))
cur.execute(sql, args)
last_id = cur.lastrowid
else:
seq = self._table.find_by_id(name)
if seq:
sql = 'UPDATE `sequence` SET `id` = `id` + 1 WHERE `name` = ?'
args = (name, )
logger.debug('Query> SQL: %s | ARGS: %s'%(sql, args))
cur.execute(sql, args)
else:
self._table.save({'name': name})
seq = self._table.find_by_id(name)
last_id = seq['id']
return last_id
def save(self, name, id):
name = '{}:{}'.format(prefix, name)
return self._table.save({'name': name, 'id': id})
seq = Table(Sequence)()
| [
"[email protected]"
] | |
454d744eedb4d7ef6400ff1daf55405c7d179bc0 | feb2ad26f596045ddccf8a36b514fb0460a37e01 | /expression_data/data/models.py | dcdcdcd4988deac32f133e4a6f8e228f877dc7bc | [
"BSD-2-Clause"
] | permissive | lamarck2008/expression-data-server | 9a06de7bd3f69cfe92dcf9d7400715e8096d2c1c | 7f70fd5d5a9569a315716c389f828b17a487fdbc | refs/heads/master | 2021-01-16T20:24:14.289633 | 2012-11-19T02:52:06 | 2012-11-19T02:52:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,797 | py | '''These models control the data saved into the database for a given experiment.
There is a generic base class named Data, which is then further subclassed into specific data models.
'''
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes import generic
from genes.models import Gene
class BaseData(models.Model):
'''This is the abstract base class for all data objects.
This model contains data for a given :class:`~experiments.models.mRNASeqExperiment` or :class:`~experiments.models.MicroArrayExperiment`.
The experiment is defined by a Generic ForeignKey to one of those two :class:`~experiments.models.Experiment` objects.
'''
#These fields control the foreignkey to the experiment.
experiment_type_choices = models.Q(app_label = 'experiments', model = 'mrnaseqexperiment') | models.Q(app_label = 'experiments', model = 'microarrayexperiment')
experiment_type = models.ForeignKey(ContentType, limit_choices_to = experiment_type_choices, help_text="Experiment Type")
experiment_id = models.PositiveIntegerField()
experiment = generic.GenericForeignKey('experiment_type', 'experiment_id')
gene = models.ForeignKey(Gene, help_text="The gene for these data.")
def __unicode__(self):
'''The unicode representation is the name.'''
return "%s" % self.gene
class Meta:
'''This is an abstract model.'''
abstract = True
class GeneExperimentData(BaseData):
'''These data are for gene-level data, aggregated per experiment.
These data can be used with :class:`~experiments.models.mRNASeqExperiment` or :class:`~experiments.models.MicroArrayExperiment` experiments.
This is an extension of the abstract base model :class:`data.models.BaseData`.
The fields in this model are based on the columns in the gene_exp.diff from cufflinks. See http://cufflinks.cbcb.umd.edu/manual.html#cuffdiff_output for more details.
The required fields are **gene**, **experiment**, **fold_change**, **p_value** and **q_value**.
'''
locus = models.CharField(max_length=20, blank=True, null=True, help_text="Chromosomal location of this gene.")
internal_id = models.CharField(max_length=20, blank=True, null=True, help_text="The probe id, or internal identification code for this gene.")
sample_1 = models.CharField(max_length=20, blank=True, null=True, help_text="The name of the first group in the comparason.")
sample_2 = models.CharField(max_length=20, blank=True, null=True, help_text="The name of the second group in the comparason.")
amount_1 = models.DecimalField(max_digits=15, decimal_places=6, blank=True, null=True, help_text="The amount in the first group.")
amount_2 = models.DecimalField(max_digits=15, decimal_places=6, blank=True, null=True, help_text="The amount in the second group.")
status = models.CharField(max_length=20, blank=True, null=True, help_text="The status code of the test.")
fold_change = models.FloatField(help_text="The log(2) fold change.")
test_statistic = models.FloatField(blank=True, null=True, help_text="The value of the test statistic used to compute significance.")
p_value = models.DecimalField(max_digits=9, decimal_places=8, help_text="Unadjusted p-value.")
q_value = models.DecimalField(max_digits=9, decimal_places=8, help_text="Multiple Comparason Adjusted p-value (Typically FDR)")
significant = models.CharField(max_length=3, blank=True, null=True, help_text="Is the q-value < 0.05?")
class Meta:
'''Updated the verbose name of the datum.'''
verbose_name_plural = 'Experiment Level Data for a Gene'
verbose_name = 'Experiment Level Datum for a Gene'
| [
"[email protected]"
] | |
f4b7ae8e9946c91cded7fe2092eda6da7b6a3cdf | 4090d8b4e8e9e28d620d222651c73a12a753be36 | /contextadv/migrations/0006_alter_contextadvertisementdescription_description.py | d762b37198cad99a6353794de8fe7074771fc939 | [] | no_license | isaev4lex/220studio | 91aa08f9d10ff55e98effe2542e26799efb6e2f2 | 6188403eeed7ee590b21da15c67af9e6f06ab06b | refs/heads/main | 2023-08-20T07:14:18.203593 | 2021-10-31T07:24:19 | 2021-10-31T07:24:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 522 | py | # Generated by Django 3.2.4 on 2021-08-05 12:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contextadv', '0005_metatags'),
]
operations = [
migrations.AlterField(
model_name='contextadvertisementdescription',
name='description',
field=models.TextField(verbose_name='Описание инструмента\n\n(для переноса строки использовать <br>)'),
),
]
| [
"[email protected]"
] | |
358be1517a6567c187fc0c758e6e8ce6b61d5ae6 | 0a1356b97465cc1d5c3f661f61b3b8c51fb05d46 | /android_binding/.buildozer/android/platform/build-armeabi-v7a/build/other_builds/hostpython3/desktop/hostpython3/Tools/msi/make_zip.py | 58f3b15ef8524e3b3487ec688380a8d5b9de0e2c | [
"GPL-1.0-or-later",
"Python-2.0",
"MIT"
] | permissive | Rohan-cod/cross_platform_calc | 00360f971e4da68dd36d6836c9ddbb157f6b77d5 | 5785a5e8150d174019b330c812e7eb012cc4dd79 | refs/heads/master | 2022-12-22T10:29:05.317051 | 2021-06-05T10:52:44 | 2021-06-05T10:52:44 | 237,465,912 | 2 | 1 | MIT | 2022-12-09T05:18:55 | 2020-01-31T16:07:31 | C | UTF-8 | Python | false | false | 7,729 | py | import argparse
import py_compile
import re
import sys
import shutil
import stat
import os
import tempfile
from itertools import chain
from pathlib import Path
from zipfile import ZipFile, ZIP_DEFLATED
TKTCL_RE = re.compile(r'^(_?tk|tcl).+\.(pyd|dll)', re.IGNORECASE)
DEBUG_RE = re.compile(r'_d\.(pyd|dll|exe|pdb|lib)$', re.IGNORECASE)
PYTHON_DLL_RE = re.compile(r'python\d\d?\.dll$', re.IGNORECASE)
DEBUG_FILES = {
'_ctypes_test',
'_testbuffer',
'_testcapi',
'_testconsole',
'_testimportmultiple',
'_testmultiphase',
'xxlimited',
'python3_dstub',
}
EXCLUDE_FROM_LIBRARY = {
'__pycache__',
'idlelib',
'pydoc_data',
'site-packages',
'tkinter',
'turtledemo',
}
EXCLUDE_FROM_EMBEDDABLE_LIBRARY = {
'ensurepip',
'venv',
}
EXCLUDE_FILE_FROM_LIBRARY = {
'bdist_wininst.py',
}
EXCLUDE_FILE_FROM_LIBS = {
'liblzma',
'python3stub',
}
EXCLUDED_FILES = {
'pyshellext',
}
def is_not_debug(p):
if DEBUG_RE.search(p.name):
return False
if TKTCL_RE.search(p.name):
return False
return p.stem.lower() not in DEBUG_FILES and p.stem.lower() not in EXCLUDED_FILES
def is_not_debug_or_python(p):
return is_not_debug(p) and not PYTHON_DLL_RE.search(p.name)
def include_in_lib(p):
name = p.name.lower()
if p.is_dir():
if name in EXCLUDE_FROM_LIBRARY:
return False
if name == 'test' and p.parts[-2].lower() == 'lib':
return False
if name in {'test', 'tests'} and p.parts[-3].lower() == 'lib':
return False
return True
if name in EXCLUDE_FILE_FROM_LIBRARY:
return False
suffix = p.suffix.lower()
return suffix not in {'.pyc', '.pyo', '.exe'}
def include_in_embeddable_lib(p):
if p.is_dir() and p.name.lower() in EXCLUDE_FROM_EMBEDDABLE_LIBRARY:
return False
return include_in_lib(p)
def include_in_libs(p):
if not is_not_debug(p):
return False
return p.stem.lower() not in EXCLUDE_FILE_FROM_LIBS
def include_in_tools(p):
if p.is_dir() and p.name.lower() in {'scripts', 'i18n', 'pynche', 'demo', 'parser'}:
return True
return p.suffix.lower() in {'.py', '.pyw', '.txt'}
BASE_NAME = 'python{0.major}{0.minor}'.format(sys.version_info)
FULL_LAYOUT = [
('/', '$build', 'python.exe', is_not_debug),
('/', '$build', 'pythonw.exe', is_not_debug),
('/', '$build', 'python{}.dll'.format(sys.version_info.major), is_not_debug),
('/', '$build', '{}.dll'.format(BASE_NAME), is_not_debug),
('DLLs/', '$build', '*.pyd', is_not_debug),
('DLLs/', '$build', '*.dll', is_not_debug_or_python),
('include/', 'include', '*.h', None),
('include/', 'PC', 'pyconfig.h', None),
('Lib/', 'Lib', '**/*', include_in_lib),
('libs/', '$build', '*.lib', include_in_libs),
('Tools/', 'Tools', '**/*', include_in_tools),
]
EMBED_LAYOUT = [
('/', '$build', 'python*.exe', is_not_debug),
('/', '$build', '*.pyd', is_not_debug),
('/', '$build', '*.dll', is_not_debug),
('{}.zip'.format(BASE_NAME), 'Lib', '**/*', include_in_embeddable_lib),
]
if os.getenv('DOC_FILENAME'):
FULL_LAYOUT.append(('Doc/', 'Doc/build/htmlhelp', os.getenv('DOC_FILENAME'), None))
if os.getenv('VCREDIST_PATH'):
FULL_LAYOUT.append(('/', os.getenv('VCREDIST_PATH'), 'vcruntime*.dll', None))
EMBED_LAYOUT.append(('/', os.getenv('VCREDIST_PATH'), 'vcruntime*.dll', None))
def copy_to_layout(target, rel_sources):
count = 0
if target.suffix.lower() == '.zip':
if target.exists():
target.unlink()
with ZipFile(str(target), 'w', ZIP_DEFLATED) as f:
with tempfile.TemporaryDirectory() as tmpdir:
for s, rel in rel_sources:
if rel.suffix.lower() == '.py':
pyc = Path(tmpdir) / rel.with_suffix('.pyc').name
try:
py_compile.compile(str(s), str(pyc), str(rel), doraise=True, optimize=2)
except py_compile.PyCompileError:
f.write(str(s), str(rel))
else:
f.write(str(pyc), str(rel.with_suffix('.pyc')))
else:
f.write(str(s), str(rel))
count += 1
else:
for s, rel in rel_sources:
dest = target / rel
try:
dest.parent.mkdir(parents=True)
except FileExistsError:
pass
if dest.is_file():
dest.chmod(stat.S_IWRITE)
shutil.copy(str(s), str(dest))
if dest.is_file():
dest.chmod(stat.S_IWRITE)
count += 1
return count
def rglob(root, pattern, condition):
dirs = [root]
recurse = pattern[:3] in {'**/', '**\\'}
while dirs:
d = dirs.pop(0)
for f in d.glob(pattern[3:] if recurse else pattern):
if recurse and f.is_dir() and (not condition or condition(f)):
dirs.append(f)
elif f.is_file() and (not condition or condition(f)):
yield f, f.relative_to(root)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--source', metavar='dir', help='The directory containing the repository root', type=Path)
parser.add_argument('-o', '--out', metavar='file', help='The name of the output archive', type=Path, default=None)
parser.add_argument('-t', '--temp', metavar='dir', help='A directory to temporarily extract files into', type=Path, default=None)
parser.add_argument('-e', '--embed', help='Create an embedding layout', action='store_true', default=False)
parser.add_argument('-b', '--build', help='Specify the build directory', type=Path, default=None)
ns = parser.parse_args()
source = ns.source or (Path(__file__).resolve().parent.parent.parent)
out = ns.out
build = ns.build or Path(sys.exec_prefix)
assert isinstance(source, Path)
assert not out or isinstance(out, Path)
assert isinstance(build, Path)
if ns.temp:
temp = ns.temp
delete_temp = False
else:
temp = Path(tempfile.mkdtemp())
delete_temp = True
if out:
try:
out.parent.mkdir(parents=True)
except FileExistsError:
pass
try:
temp.mkdir(parents=True)
except FileExistsError:
pass
layout = EMBED_LAYOUT if ns.embed else FULL_LAYOUT
try:
for t, s, p, c in layout:
if s == '$build':
fs = build
else:
fs = source / s
files = rglob(fs, p, c)
extra_files = []
if s == 'Lib' and p == '**/*':
extra_files.append((
source / 'tools' / 'msi' / 'distutils.command.bdist_wininst.py',
Path('distutils') / 'command' / 'bdist_wininst.py'
))
copied = copy_to_layout(temp / t.rstrip('/'), chain(files, extra_files))
print('Copied {} files'.format(copied))
if ns.embed:
with open(str(temp / (BASE_NAME + '._pth')), 'w') as f:
print(BASE_NAME + '.zip', file=f)
print('.', file=f)
print('', file=f)
print('# Uncomment to run site.main() automatically', file=f)
print('#import site', file=f)
if out:
total = copy_to_layout(out, rglob(temp, '**/*', None))
print('Wrote {} files to {}'.format(total, out))
finally:
if delete_temp:
shutil.rmtree(temp, True)
if __name__ == "__main__":
sys.exit(int(main() or 0))
| [
"[email protected]"
] | |
253b6652ddac0a3ffbcf6e0fd96dfc8abecaf9b8 | a3bb97955ad28e8c83a23e4466bb5352ee86847d | /revision/apps/public/forms.py | 9b3b57cd9930137d58592f723e09c96bb6e411bb | [] | no_license | rosscdh/revision | 23ac75385cca5b44032ff2637eb635fa954bb2ec | 090fb2a82072c5570d89878c6f506dd22d5c5ed5 | refs/heads/master | 2016-09-05T10:53:33.652493 | 2014-11-29T10:57:41 | 2014-11-29T10:57:41 | 23,582,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,484 | py | # -*- coding: utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
from django.core.urlresolvers import reverse_lazy
from parsley.decorators import parsleyfy
from crispy_forms.helper import FormHelper, Layout
from crispy_forms.layout import ButtonHolder, Div, Field, Fieldset, HTML, Submit
from revision.utils import _get_unique_username
import logging
logger = logging.getLogger('django.request')
@parsleyfy
class SignUpForm(forms.Form):
username = forms.CharField(
required=False,
widget=forms.HiddenInput
)
first_name = forms.CharField(
error_messages={
'required': "First name can't be blank."
},
label='',
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'First name'})
)
last_name = forms.CharField(
error_messages={
'required': "Last name can't be blank."
},
label='',
max_length=30,
widget=forms.TextInput(attrs={'placeholder': 'Last name'})
)
email = forms.EmailField(
error_messages={
'invalid': "Email is invalid.",
'required': "Email can't be blank."
},
label='',
max_length=75,
widget=forms.EmailInput(attrs={'placeholder': 'Email address', 'autocomplete': 'off'})
)
password = forms.CharField(
error_messages={
'required': "Password can't be blank."
},
label='',
widget=forms.PasswordInput(attrs={'placeholder': 'Password'})
)
password_confirm = forms.CharField(
error_messages={
'required': "Confirm password can't be blank."
},
label='',
widget=forms.PasswordInput(attrs={'placeholder': 'Password again'})
)
t_and_c = forms.BooleanField(
error_messages={
'required': "You must agree to the Terms and Conditions."
},
initial=False,
label='I agree to the Terms and Conditions.',
required=True
)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.attrs = {
'id': 'signup-form',
'parsley-validate': ''
}
self.helper.form_show_errors = False
self.helper.layout = Layout(
HTML('{% include "partials/form-errors.html" with form=form %}'),
Fieldset(
'',
Div(
Field('first_name', css_class=''),
Field('last_name', css_class=''),
css_class='form-name clearfix'
),
Field('email'),
Field('password'),
Field('password_confirm'),
Field('t_and_c', template='partials/t_and_c.html'),
),
ButtonHolder(
Submit('submit', 'Create Account')
)
)
super(SignUpForm, self).__init__(*args, **kwargs)
# Override the label with a link to the terms (can't go higher as the urls aren't loaded yet)
self.fields['t_and_c'].label = 'I agree to the <a href="%s" target="_blank">Terms and Conditions</a>.' % reverse_lazy('public:terms')
def clean_username(self):
final_username = self.data.get('email').split('@')[0]
final_username = _get_unique_username(username=final_username)
logger.info('Username %s available' % final_username)
return final_username
def clean_password_confirm(self):
password_confirm = self.cleaned_data.get('password_confirm')
password = self.cleaned_data.get('password')
if password != password_confirm:
raise forms.ValidationError("The two password fields didn't match.")
return password_confirm
def clean_email(self):
"""
Ensure the email is normalised
"""
email = User.objects.normalize_email(self.cleaned_data.get('email'))
user = User.objects.filter(email=email).first()
if user is None:
return email
else:
#
# NOTE! We cant be specific about the email in use as a message here as
# it could be used to determine if that email address exists (which it does
# and its prety clear but making the text a bit less specific may put them off)
#
raise forms.ValidationError("Sorry, but you cant use that email address.")
def save(self):
user = User.objects.create_user(self.cleaned_data.get('username'),
self.cleaned_data.get('email'),
self.cleaned_data.get('password'),
first_name=self.cleaned_data.get('first_name'),
last_name=self.cleaned_data.get('last_name'))
return user
@parsleyfy
class SignInForm(forms.Form):
email = forms.EmailField(
error_messages={
'required': "Email can't be blank."
},
label='',
widget=forms.EmailInput(attrs={'placeholder': 'Email address'})
)
password = forms.CharField(
error_messages={
'required': "Password can't be blank."
},
label='',
widget=forms.PasswordInput(attrs={'placeholder': 'Password'})
)
def __init__(self, *args, **kwargs):
self.helper = FormHelper()
self.helper.attrs = {
'parsley-validate': '',
}
self.helper.form_show_errors = False
self.helper.layout = Layout(
HTML('{% include "partials/form-errors.html" with form=form %}'),
Fieldset(
'',
Field('email', css_class='input-hg'),
Field('password', css_class='input-hg'),
),
ButtonHolder(
Submit('submit', 'Secure Sign In', css_class='btn btn-primary btn-lg')
)
)
super(SignInForm, self).__init__(*args, **kwargs)
def clean(self):
user = None
if 'email' in self.cleaned_data and 'password' in self.cleaned_data:
user = authenticate(username=self.cleaned_data['email'], password=self.cleaned_data['password'])
if user is None:
raise forms.ValidationError("Sorry, no account with those credentials was found.")
return super(SignInForm, self).clean()
| [
"[email protected]"
] | |
4e009c93c039eb04670636eb123f6a973e479fd8 | f9d564f1aa83eca45872dab7fbaa26dd48210d08 | /huaweicloud-sdk-dbss/huaweicloudsdkdbss/v1/model/batch_delete_resource_tag_request.py | 951851e88d7c6383d31b3e128954862b7a8c1840 | [
"Apache-2.0"
] | permissive | huaweicloud/huaweicloud-sdk-python-v3 | cde6d849ce5b1de05ac5ebfd6153f27803837d84 | f69344c1dadb79067746ddf9bfde4bddc18d5ecf | refs/heads/master | 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 | NOASSERTION | 2023-06-22T14:50:48 | 2020-05-08T02:28:43 | Python | UTF-8 | Python | false | false | 4,953 | py | # coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class BatchDeleteResourceTagRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'resource_type': 'str',
'resource_id': 'str',
'body': 'ResourceTagRequest'
}
attribute_map = {
'resource_type': 'resource_type',
'resource_id': 'resource_id',
'body': 'body'
}
def __init__(self, resource_type=None, resource_id=None, body=None):
"""BatchDeleteResourceTagRequest
The model defined in huaweicloud sdk
:param resource_type: 资源类型。审计:auditInstance
:type resource_type: str
:param resource_id: 资源ID
:type resource_id: str
:param body: Body of the BatchDeleteResourceTagRequest
:type body: :class:`huaweicloudsdkdbss.v1.ResourceTagRequest`
"""
self._resource_type = None
self._resource_id = None
self._body = None
self.discriminator = None
self.resource_type = resource_type
self.resource_id = resource_id
if body is not None:
self.body = body
@property
def resource_type(self):
"""Gets the resource_type of this BatchDeleteResourceTagRequest.
资源类型。审计:auditInstance
:return: The resource_type of this BatchDeleteResourceTagRequest.
:rtype: str
"""
return self._resource_type
@resource_type.setter
def resource_type(self, resource_type):
"""Sets the resource_type of this BatchDeleteResourceTagRequest.
资源类型。审计:auditInstance
:param resource_type: The resource_type of this BatchDeleteResourceTagRequest.
:type resource_type: str
"""
self._resource_type = resource_type
@property
def resource_id(self):
"""Gets the resource_id of this BatchDeleteResourceTagRequest.
资源ID
:return: The resource_id of this BatchDeleteResourceTagRequest.
:rtype: str
"""
return self._resource_id
@resource_id.setter
def resource_id(self, resource_id):
"""Sets the resource_id of this BatchDeleteResourceTagRequest.
资源ID
:param resource_id: The resource_id of this BatchDeleteResourceTagRequest.
:type resource_id: str
"""
self._resource_id = resource_id
@property
def body(self):
"""Gets the body of this BatchDeleteResourceTagRequest.
:return: The body of this BatchDeleteResourceTagRequest.
:rtype: :class:`huaweicloudsdkdbss.v1.ResourceTagRequest`
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this BatchDeleteResourceTagRequest.
:param body: The body of this BatchDeleteResourceTagRequest.
:type body: :class:`huaweicloudsdkdbss.v1.ResourceTagRequest`
"""
self._body = body
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, BatchDeleteResourceTagRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
90ce17400257d8e886aa3c49973efb6bbe7e3d0f | 8830831a87f35ff2628f379d8230928ec6b5641a | /BNPParibas/code/gbc_deviance.py | f947f44609ebf50d5d1c3aa5f5f6442aa072e2f5 | [] | no_license | nickmcadden/Kaggle | e5882c9d68a81700d8d969328d91c059a0643868 | cbc5347dec90e4bf64d4dbaf28b8ffb362efc64f | refs/heads/master | 2019-07-18T08:09:40.683168 | 2018-01-26T14:35:38 | 2018-01-26T14:35:38 | 40,735,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,263 | py | import sys
import pandas as pd
import numpy as np
import scipy as sp
import xgboost as xgb
import data
import argparse
import pickle as pkl
from scipy import stats
from collections import OrderedDict
from sklearn.utils import shuffle
from sklearn.cross_validation import StratifiedShuffleSplit, KFold
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.utils import shuffle
def log_loss(act, pred):
""" Vectorised computation of logloss """
epsilon = 1e-15
pred = sp.maximum(epsilon, pred)
pred = sp.minimum(1-epsilon, pred)
ll = sum(act*sp.log(pred) + sp.subtract(1, act)*sp.log(sp.subtract(1, pred)))
ll = ll * -1.0/len(act)
return ll
parser = argparse.ArgumentParser(description='XGBoost for BNP')
parser.add_argument('-f','--n_features', help='Number of features', type=int, default=1000)
parser.add_argument('-n','--n_rounds', help='Number of iterations', type=int, default=350)
parser.add_argument('-e','--eta', help='Learning rate', type=float, default=0.0125)
parser.add_argument('-r','--r_seed', help='Set random seed', type=int, default=3)
parser.add_argument('-b','--minbin', help='Minimum categorical bin size', type=int, default=1)
parser.add_argument('-ct','--cat_trans', help='Category transformation method', type=str, default='std')
parser.add_argument('-cv','--cv', action='store_true')
parser.add_argument('-codetest','--codetest', action='store_true')
parser.add_argument('-getcached', '--getcached', action='store_true')
parser.add_argument('-extra', '--extra', action='store_true')
m_params = vars(parser.parse_args())
# Load data
X, y, X_sub, ids = data.load(m_params)
print("BNP Parabas: classification...\n")
clf = GradientBoostingClassifier(loss='deviance', learning_rate=m_params['eta'], n_estimators=m_params['n_rounds'], subsample=1, max_features= 35, min_samples_split= 4, max_depth = 12, min_samples_leaf= 2, verbose=2, random_state=1)
if m_params['cv']:
# do cross validation scoring
kf = KFold(X.shape[0], n_folds=4, shuffle=True, random_state=1)
scr = np.zeros([len(kf)])
oob_pred = np.zeros(X.shape[0])
sub_pred = np.zeros((X_sub.shape[0], 4))
for i, (tr_ix, val_ix) in enumerate(kf):
clf.fit(X[tr_ix], y[tr_ix])
pred = clf.predict_proba(X[val_ix])
oob_pred[val_ix] = np.array(pred[:,1])
sub_pred[:,i] = clf.predict_proba(X_sub)[:,1]
scr[i] = log_loss(y[val_ix], np.array(pred[:,1]))
print('Train score is:', scr[i])
print(log_loss(y, oob_pred))
print oob_pred[1:10]
sub_pred = sub_pred.mean(axis=1)
oob_pred_filename = '../output/oob_pred_gbcdeviance_' + str(np.mean(scr))
sub_pred_filename = '../output/sub_pred_gbcdeviance_' + str(np.mean(scr))
pkl.dump(oob_pred, open(oob_pred_filename + '.p', 'wb'))
pkl.dump(sub_pred, open(sub_pred_filename + '.p', 'wb'))
preds = pd.DataFrame({"ID": ids, "PredictedProb": sub_pred})
preds.to_csv(sub_pred_filename + '.csv', index=False)
else:
X, y = shuffle(X, y)
# Train on full data
print("Training on full data")
clf.fit(X,y)
print("Creating predictions")
pred = clf.predict_proba(X_sub)
print("Saving Results.")
model_name = '../output/pred_gbcdev_' + str(m_params['n_rounds'])
preds = pd.DataFrame({"ID": ids, "PredictedProb": pred[:,1]})
preds.to_csv(model_name + '.csv', index=False)
| [
"[email protected]"
] | |
1d92b49f32e9e43e80212f55a3859782d21b2ef1 | b777fc0715c1d20faf0c849f6894cf41cfe90cbe | /tests/test_decompressor_fuzzing.py | a7f81adab4e4d6d70076e5aa4efea84caea7078d | [
"BSD-3-Clause"
] | permissive | pombredanne/python-zstandard | 31a417d3a7d5a05594346235fd88e89e9a01d698 | 70dcc805d7761b1d7e35fd219a4d5d4512acd96a | refs/heads/master | 2020-09-16T08:18:39.956344 | 2019-11-16T04:40:30 | 2019-11-16T04:41:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,091 | py | import io
import os
import unittest
try:
import hypothesis
import hypothesis.strategies as strategies
except ImportError:
raise unittest.SkipTest("hypothesis not available")
import zstandard as zstd
from .common import (
make_cffi,
NonClosingBytesIO,
random_input_data,
)
@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
@make_cffi
class TestDecompressor_stream_reader_fuzzing(unittest.TestCase):
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
streaming=strategies.booleans(),
source_read_size=strategies.integers(1, 1048576),
read_sizes=strategies.data(),
)
def test_stream_source_read_variance(
self, original, level, streaming, source_read_size, read_sizes
):
cctx = zstd.ZstdCompressor(level=level)
if streaming:
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(original)
writer.flush(zstd.FLUSH_FRAME)
source.seek(0)
else:
frame = cctx.compress(original)
source = io.BytesIO(frame)
dctx = zstd.ZstdDecompressor()
chunks = []
with dctx.stream_reader(source, read_size=source_read_size) as reader:
while True:
read_size = read_sizes.draw(strategies.integers(-1, 131072))
chunk = reader.read(read_size)
if not chunk and read_size:
break
chunks.append(chunk)
self.assertEqual(b"".join(chunks), original)
# Similar to above except we have a constant read() size.
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
streaming=strategies.booleans(),
source_read_size=strategies.integers(1, 1048576),
read_size=strategies.integers(-1, 131072),
)
def test_stream_source_read_size(
self, original, level, streaming, source_read_size, read_size
):
if read_size == 0:
read_size = 1
cctx = zstd.ZstdCompressor(level=level)
if streaming:
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(original)
writer.flush(zstd.FLUSH_FRAME)
source.seek(0)
else:
frame = cctx.compress(original)
source = io.BytesIO(frame)
dctx = zstd.ZstdDecompressor()
chunks = []
reader = dctx.stream_reader(source, read_size=source_read_size)
while True:
chunk = reader.read(read_size)
if not chunk and read_size:
break
chunks.append(chunk)
self.assertEqual(b"".join(chunks), original)
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
streaming=strategies.booleans(),
source_read_size=strategies.integers(1, 1048576),
read_sizes=strategies.data(),
)
def test_buffer_source_read_variance(
self, original, level, streaming, source_read_size, read_sizes
):
cctx = zstd.ZstdCompressor(level=level)
if streaming:
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(original)
writer.flush(zstd.FLUSH_FRAME)
frame = source.getvalue()
else:
frame = cctx.compress(original)
dctx = zstd.ZstdDecompressor()
chunks = []
with dctx.stream_reader(frame, read_size=source_read_size) as reader:
while True:
read_size = read_sizes.draw(strategies.integers(-1, 131072))
chunk = reader.read(read_size)
if not chunk and read_size:
break
chunks.append(chunk)
self.assertEqual(b"".join(chunks), original)
# Similar to above except we have a constant read() size.
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
streaming=strategies.booleans(),
source_read_size=strategies.integers(1, 1048576),
read_size=strategies.integers(-1, 131072),
)
def test_buffer_source_constant_read_size(
self, original, level, streaming, source_read_size, read_size
):
if read_size == 0:
read_size = -1
cctx = zstd.ZstdCompressor(level=level)
if streaming:
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(original)
writer.flush(zstd.FLUSH_FRAME)
frame = source.getvalue()
else:
frame = cctx.compress(original)
dctx = zstd.ZstdDecompressor()
chunks = []
reader = dctx.stream_reader(frame, read_size=source_read_size)
while True:
chunk = reader.read(read_size)
if not chunk and read_size:
break
chunks.append(chunk)
self.assertEqual(b"".join(chunks), original)
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
streaming=strategies.booleans(),
source_read_size=strategies.integers(1, 1048576),
)
def test_stream_source_readall(self, original, level, streaming, source_read_size):
cctx = zstd.ZstdCompressor(level=level)
if streaming:
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(original)
writer.flush(zstd.FLUSH_FRAME)
source.seek(0)
else:
frame = cctx.compress(original)
source = io.BytesIO(frame)
dctx = zstd.ZstdDecompressor()
data = dctx.stream_reader(source, read_size=source_read_size).readall()
self.assertEqual(data, original)
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
streaming=strategies.booleans(),
source_read_size=strategies.integers(1, 1048576),
read_sizes=strategies.data(),
)
def test_stream_source_read1_variance(
self, original, level, streaming, source_read_size, read_sizes
):
cctx = zstd.ZstdCompressor(level=level)
if streaming:
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(original)
writer.flush(zstd.FLUSH_FRAME)
source.seek(0)
else:
frame = cctx.compress(original)
source = io.BytesIO(frame)
dctx = zstd.ZstdDecompressor()
chunks = []
with dctx.stream_reader(source, read_size=source_read_size) as reader:
while True:
read_size = read_sizes.draw(strategies.integers(-1, 131072))
chunk = reader.read1(read_size)
if not chunk and read_size:
break
chunks.append(chunk)
self.assertEqual(b"".join(chunks), original)
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
streaming=strategies.booleans(),
source_read_size=strategies.integers(1, 1048576),
read_sizes=strategies.data(),
)
def test_stream_source_readinto1_variance(
self, original, level, streaming, source_read_size, read_sizes
):
cctx = zstd.ZstdCompressor(level=level)
if streaming:
source = io.BytesIO()
writer = cctx.stream_writer(source)
writer.write(original)
writer.flush(zstd.FLUSH_FRAME)
source.seek(0)
else:
frame = cctx.compress(original)
source = io.BytesIO(frame)
dctx = zstd.ZstdDecompressor()
chunks = []
with dctx.stream_reader(source, read_size=source_read_size) as reader:
while True:
read_size = read_sizes.draw(strategies.integers(1, 131072))
b = bytearray(read_size)
count = reader.readinto1(b)
if not count:
break
chunks.append(bytes(b[0:count]))
self.assertEqual(b"".join(chunks), original)
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 1048576),
seek_amounts=strategies.data(),
read_sizes=strategies.data(),
)
def test_relative_seeks(
self, original, level, source_read_size, seek_amounts, read_sizes
):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
dctx = zstd.ZstdDecompressor()
with dctx.stream_reader(frame, read_size=source_read_size) as reader:
while True:
amount = seek_amounts.draw(strategies.integers(0, 16384))
reader.seek(amount, os.SEEK_CUR)
offset = reader.tell()
read_amount = read_sizes.draw(strategies.integers(1, 16384))
chunk = reader.read(read_amount)
if not chunk:
break
self.assertEqual(original[offset : offset + len(chunk)], chunk)
@hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.large_base_example]
)
@hypothesis.given(
originals=strategies.data(),
frame_count=strategies.integers(min_value=2, max_value=10),
level=strategies.integers(min_value=1, max_value=5),
source_read_size=strategies.integers(1, 1048576),
read_sizes=strategies.data(),
)
def test_multiple_frames(
self, originals, frame_count, level, source_read_size, read_sizes
):
cctx = zstd.ZstdCompressor(level=level)
source = io.BytesIO()
buffer = io.BytesIO()
writer = cctx.stream_writer(buffer)
for i in range(frame_count):
data = originals.draw(strategies.sampled_from(random_input_data()))
source.write(data)
writer.write(data)
writer.flush(zstd.FLUSH_FRAME)
dctx = zstd.ZstdDecompressor()
buffer.seek(0)
reader = dctx.stream_reader(
buffer, read_size=source_read_size, read_across_frames=True
)
chunks = []
while True:
read_amount = read_sizes.draw(strategies.integers(-1, 16384))
chunk = reader.read(read_amount)
if not chunk and read_amount:
break
chunks.append(chunk)
self.assertEqual(source.getvalue(), b"".join(chunks))
@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
@make_cffi
class TestDecompressor_stream_writer_fuzzing(unittest.TestCase):
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
write_size=strategies.integers(min_value=1, max_value=8192),
input_sizes=strategies.data(),
)
def test_write_size_variance(self, original, level, write_size, input_sizes):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
dctx = zstd.ZstdDecompressor()
source = io.BytesIO(frame)
dest = NonClosingBytesIO()
with dctx.stream_writer(dest, write_size=write_size) as decompressor:
while True:
input_size = input_sizes.draw(strategies.integers(1, 4096))
chunk = source.read(input_size)
if not chunk:
break
decompressor.write(chunk)
self.assertEqual(dest.getvalue(), original)
@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
@make_cffi
class TestDecompressor_copy_stream_fuzzing(unittest.TestCase):
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
read_size=strategies.integers(min_value=1, max_value=8192),
write_size=strategies.integers(min_value=1, max_value=8192),
)
def test_read_write_size_variance(self, original, level, read_size, write_size):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
source = io.BytesIO(frame)
dest = io.BytesIO()
dctx = zstd.ZstdDecompressor()
dctx.copy_stream(source, dest, read_size=read_size, write_size=write_size)
self.assertEqual(dest.getvalue(), original)
@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
@make_cffi
class TestDecompressor_decompressobj_fuzzing(unittest.TestCase):
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
chunk_sizes=strategies.data(),
)
def test_random_input_sizes(self, original, level, chunk_sizes):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
source = io.BytesIO(frame)
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj()
chunks = []
while True:
chunk_size = chunk_sizes.draw(strategies.integers(1, 4096))
chunk = source.read(chunk_size)
if not chunk:
break
chunks.append(dobj.decompress(chunk))
self.assertEqual(b"".join(chunks), original)
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
write_size=strategies.integers(
min_value=1, max_value=4 * zstd.DECOMPRESSION_RECOMMENDED_OUTPUT_SIZE
),
chunk_sizes=strategies.data(),
)
def test_random_output_sizes(self, original, level, write_size, chunk_sizes):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
source = io.BytesIO(frame)
dctx = zstd.ZstdDecompressor()
dobj = dctx.decompressobj(write_size=write_size)
chunks = []
while True:
chunk_size = chunk_sizes.draw(strategies.integers(1, 4096))
chunk = source.read(chunk_size)
if not chunk:
break
chunks.append(dobj.decompress(chunk))
self.assertEqual(b"".join(chunks), original)
@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
@make_cffi
class TestDecompressor_read_to_iter_fuzzing(unittest.TestCase):
@hypothesis.given(
original=strategies.sampled_from(random_input_data()),
level=strategies.integers(min_value=1, max_value=5),
read_size=strategies.integers(min_value=1, max_value=4096),
write_size=strategies.integers(min_value=1, max_value=4096),
)
def test_read_write_size_variance(self, original, level, read_size, write_size):
cctx = zstd.ZstdCompressor(level=level)
frame = cctx.compress(original)
source = io.BytesIO(frame)
dctx = zstd.ZstdDecompressor()
chunks = list(
dctx.read_to_iter(source, read_size=read_size, write_size=write_size)
)
self.assertEqual(b"".join(chunks), original)
@unittest.skipUnless("ZSTD_SLOW_TESTS" in os.environ, "ZSTD_SLOW_TESTS not set")
class TestDecompressor_multi_decompress_to_buffer_fuzzing(unittest.TestCase):
@hypothesis.given(
original=strategies.lists(
strategies.sampled_from(random_input_data()), min_size=1, max_size=1024
),
threads=strategies.integers(min_value=1, max_value=8),
use_dict=strategies.booleans(),
)
def test_data_equivalence(self, original, threads, use_dict):
kwargs = {}
if use_dict:
kwargs["dict_data"] = zstd.ZstdCompressionDict(original[0])
cctx = zstd.ZstdCompressor(
level=1, write_content_size=True, write_checksum=True, **kwargs
)
if not hasattr(cctx, "multi_compress_to_buffer"):
self.skipTest("multi_compress_to_buffer not available")
frames_buffer = cctx.multi_compress_to_buffer(original, threads=-1)
dctx = zstd.ZstdDecompressor(**kwargs)
result = dctx.multi_decompress_to_buffer(frames_buffer)
self.assertEqual(len(result), len(original))
for i, frame in enumerate(result):
self.assertEqual(frame.tobytes(), original[i])
frames_list = [f.tobytes() for f in frames_buffer]
result = dctx.multi_decompress_to_buffer(frames_list)
self.assertEqual(len(result), len(original))
for i, frame in enumerate(result):
self.assertEqual(frame.tobytes(), original[i])
| [
"[email protected]"
] | |
50b0d0d43f43bcda2ef5a05062a45b32b719010f | 4bd5cdb67fdd6a6f0ceb3af025ceaf977b661273 | /gconv_experiments/groupy/garray/D4h_array.py | 1c0b4b98530c21dfa94b6937ccf955673ddf5fa0 | [] | no_license | andreiqv/gconv | 93d7d313cdc78e2bfefd53820918293526fc4680 | 23f9ec62b119c64cc87f8727cc1e409a469db0f1 | refs/heads/master | 2020-05-07T21:05:28.840973 | 2019-04-11T23:25:31 | 2019-04-11T23:25:31 | 180,890,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,376 | py | import numpy as np
from groupy.garray.finitegroup import FiniteGroup
from groupy.garray.matrix_garray import MatrixGArray
from groupy.garray.D4ht_array import D4htArray
from groupy.garray.Z3_array import Z3Array
"""
Implementation of dihedral finite group D4h, consisting of 16 elements in total.
These are the elements of C4h, with added reflection.
Int parameterisation contains an extra parameter, m (in {0, 1}) to represent this reflection.
"""
class D4hArray(MatrixGArray):
parameterizations = ['int', 'mat', 'hmat']
_g_shapes = {'int': (3,), 'mat': (3, 3), 'hmat': (4, 4)}
_left_actions = {}
_reparameterizations = {}
_group_name = 'D4h'
def __init__(self, data, p='int'):
data = np.asarray(data)
assert data.dtype == np.int
# classes OArray can be multiplied with
self._left_actions[D4hArray] = self.__class__.left_action_hmat
self._left_actions[D4htArray] = self.__class__.left_action_hmat
self._left_actions[Z3Array] = self.__class__.left_action_vec
super(D4hArray, self).__init__(data, p)
self.elements = self.get_elements()
def mat2int(self, mat_data):
'''
Transforms 3x3 matrix representation to int representation.
To handle any size and shape of mat_data, the original mat_data
is reshaped to a long list of 3x3 matrices, converted to a list of
int representations, and reshaped back to the original mat_data shape.
mat-2-int is achieved by taking the matrix, and looking up whether it
exists in the element list. If not, the matrix should be multiplied with -1
to retrieve the reflection. The resulting matrix can be looked up in the
element list, and that index can be converted to y and z.
'''
input = mat_data.reshape((-1, 3, 3))
data = np.zeros((input.shape[0], 3), dtype=np.int)
for i in xrange(input.shape[0]):
mat = input[i]
# check for reflection
if mat.tolist() not in self.elements:
mat = np.array(mat) * -1
data[i, 2] = 1
# determine z and y
index = self.elements.index(mat.tolist())
z = int(index % 4)
y = int((index - z) / 4)
data[i, 0] = y
data[i, 1] = z
data = data.reshape(mat_data.shape[:-2] + (3,))
return data
def int2mat(self, int_data):
'''
Transforms integer representation to 3x3 matrix representation.
Original int_data is flattened and later reshaped back to its original
shape to handle any size and shape of input.
'''
# rotations over y, z and reflection
y = int_data[..., 0].flatten()
z = int_data[..., 1].flatten()
m = int_data[..., 2].flatten()
data = np.zeros((len(y),) + (3, 3), dtype=np.int)
for j in xrange(len(y)):
index = (y[j] * 4) + z[j]
mat = self.elements[index]
mat = np.array(mat) * ((-1) ** m[j]) # mirror if reflection
data[j, 0:3, 0:3] = mat.tolist()
data = data.reshape(int_data.shape[:-1] + (3, 3))
return data
def _multiply(self, element, generator, times):
'''
Helper function to multiply an _element_ with a _generator_
_times_ number of times.
'''
element = np.array(element)
for i in range(times):
element = np.dot(element, np.array(generator))
return element
def get_elements(self):
'''
Function to generate a list containing elements of group D4h,
similar to get_elements() of BArray.
Elements are stored as lists rather than numpy arrays to enable
lookup through self.elements.index(x).
'''
# specify generators
g1 = np.array([[-1, 0, 0], [0, 1, 0], [0, 0, -1]]) # 180 degrees over y
g2 = np.array([[0, -1, 0], [1, 0, 0], [0, 0, 1]]) # 90 degrees over z
element_list = []
element = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) # starting point = identity matrix
for i in range(0, 2):
element = self._multiply(element, g1, i)
for j in range(0, 4):
element = self._multiply(element, g2, j)
element_list.append(element.tolist())
return element_list
class D4hGroup(FiniteGroup, D4hArray):
def __init__(self):
D4hArray.__init__(
self,
data=np.array([[i, j, m] for i in xrange(2) for j in xrange(4) for m in xrange(2)]),
p='int'
)
FiniteGroup.__init__(self, D4hArray)
def factory(self, *args, **kwargs):
return D4hArray(*args, **kwargs)
D4h = D4hGroup()
def rand(size=()):
'''
Returns an D4hArray of shape size, with randomly chosen elements in int parameterization.
'''
data = np.zeros(size + (3,), dtype=np.int)
data[..., 0] = np.random.randint(0, 2, size)
data[..., 1] = np.random.randint(0, 4, size)
data[..., 2] = np.random.randint(0, 2, size)
return D4hArray(data=data, p='int')
def identity(p='int'):
'''
Returns the identity element: a matrix with 1's on the diagonal.
'''
li = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
e = D4hArray(data=np.array(li, dtype=np.int), p='mat')
return e.reparameterize(p)
| [
"[email protected]"
] | |
ef955cf11a1cd96660828ba53df533af7add7417 | a9b5bc48a89329aa44cb4dd63ce47a3c0dfc90ba | /tests/test_withings_object.py | 8fa9c36a34257583ebac5c15851c1621aa312ca8 | [
"MIT"
] | permissive | randi120/python-withings | d050a263f5c500ad258072dbb3661a43dd225de3 | 016bb3cc2d62a4e2813df422829eba21530570bc | refs/heads/master | 2021-01-22T13:47:49.355343 | 2014-12-26T00:47:12 | 2014-12-26T00:47:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 938 | py | import time
import unittest
from datetime import datetime
from withings import WithingsObject
class TestWithingsObject(unittest.TestCase):
def test_attributes(self):
data = {
"date": "2013-04-10",
"string": "FAKE_STRING",
"integer": 55555,
"float": 5.67
}
obj = WithingsObject(data)
self.assertEqual(datetime.strftime(obj.date, '%Y-%m-%d'), data['date'])
self.assertEqual(obj.string, data['string'])
self.assertEqual(obj.integer, data['integer'])
self.assertEqual(obj.float, data['float'])
# Test time as epoch
data = {"date": 1409596058}
obj = WithingsObject(data)
self.assertEqual(time.mktime(obj.date.timetuple()), data['date'])
# Test funky time
data = {"date": "weird and wacky date format"}
obj = WithingsObject(data)
self.assertEqual(obj.date, data['date'])
| [
"[email protected]"
] | |
cc1454d122573184c132666c2fe8f7e97e045849 | d8416cd4c8f532809c4c9d368d43fa773b3b198c | /torchsupport/flex/examples/cifar_tdre.py | 546881ac3c571f5f93a027d84caa06030768d4c4 | [
"MIT"
] | permissive | DavidMetzIMT/torchsupport | a53a0d532b7542d81dc158d3d67f195cbce86bf9 | a0ca719c820a4895e98091c52e43c5300e1a71a3 | refs/heads/master | 2023-05-28T21:45:09.302210 | 2021-06-14T17:30:58 | 2021-06-14T17:30:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,151 | py | from functools import partial
from torchsupport.data.namedtuple import namespace
import torch
import torch.nn as nn
import torch.nn.functional as func
from torch.distributions import Normal
from torch.utils.data import Dataset
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
from torchsupport.modules import ReZero
from torchsupport.training.samplers import Langevin
from torchsupport.utils.argparse import parse_options
from torchsupport.flex.log.log_types import LogImage
from torchsupport.flex.context.context import TrainingContext
from torchsupport.flex.data_distributions.data_distribution import DataDistribution
from torchsupport.flex.tasks.energy.density_ratio import direct_mixing, noise_contrastive_estimation, probability_surface_estimation, random_dim_mixing, tdre_mixing, tdre_step, tnce_step, independent_mixing, vp_mixing
from torchsupport.flex.training.density_ratio import telescoping_density_ratio_training
def valid_callback(args, ctx: TrainingContext=None):
ctx.log(images=LogImage(args.sample))
labels = args.prediction.argmax(dim=1)
for idx in range(10):
positive = args.sample[labels == idx]
if positive.size(0) != 0:
ctx.log(**{f"classified {idx}": LogImage(positive)})
def generate_step(energy, base, integrator: Langevin=None, ctx=None):
sample = base.sample(ctx.batch_size)
levels = torch.arange(0.0, 1.0, 0.01, device=opt.device)
for level in reversed(levels):
this_level = level * torch.ones(sample.size(0), device=sample.device)
sample = integrator.integrate(
ConditionalEnergy(energy, sample, shift=0.025), sample, this_level, None
)
result = ((sample + 1) / 2).clamp(0, 1)
ctx.log(samples=LogImage(result))
class CIFAR10Dataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
data, _ = self.data[index]
data = data + torch.randn_like(data) / 255
return 2 * data - 1, []
def __len__(self):
return len(self.data)
class Base(nn.Module):
def __init__(self):
super().__init__()
self.mean = nn.Parameter(torch.zeros(3, 1, 1))
self.logv = nn.Parameter(torch.zeros(3, 1, 1))
def sample(self, batch_size):
dist = Normal(
self.mean.expand(3, 32, 32),
self.logv.exp().expand(3, 32, 32)
)
return torch.randn(batch_size, 3, 32, 32, device=self.mean.device)#dist.rsample(sample_shape=(batch_size,))
def log_prob(self, data, condition):
return torch.zeros_like(self(data, condition)[0])
def forward(self, data, condition):
dist = Normal(self.mean, self.logv.exp())
log_p = dist.log_prob(data)
log_p = log_p.view(*log_p.shape[:-3], -1)
return log_p.sum(dim=-1, keepdim=True), namespace(
distribution=dist
)
class SineEmbedding(nn.Module):
def __init__(self, size, depth=2):
super().__init__()
self.blocks = nn.ModuleList([
nn.Linear(1, size)
] + [
nn.Linear(size, size)
for idx in range(depth - 1)
])
def forward(self, time):
out = time[:, None]
for block in self.blocks:
out = block(out).sin()
return out
class ResBlock(nn.Module):
def __init__(self, size):
super().__init__()
self.condify = SineEmbedding(2 * size)
self.skip = SineEmbedding(2 * size)
self.blocks = nn.ModuleList([
nn.Conv2d(size, size, 3, padding=1)
for idx in range(2)
])
self.zero = ReZero(size)
def forward(self, inputs, levels):
cond = self.condify(levels)
cond = cond.view(*cond.shape, 1, 1)
skip = self.skip(levels)
skip = skip.view(*skip.shape, 1, 1)
scale, bias = cond.chunk(2, dim=1)
skip_scale, skip_bias = skip.chunk(2, dim=1)
out = func.silu(self.blocks[0](inputs))
out = scale * out + bias
out = self.blocks[1](out)
inputs = skip_scale * inputs + skip_bias
return self.zero(inputs, out)
class Energy(nn.Module):
def __init__(self, base):
super().__init__()
self.base = base
self.conv = nn.ModuleList([
nn.Conv2d(3, 32, 3, padding=1),
nn.Conv2d(32, 64, 3, padding=1),
nn.Conv2d(64, 128, 3, padding=1),
nn.Conv2d(128, 256, 3, padding=1)
])
self.res = nn.ModuleList([
ResBlock(32),
ResBlock(64),
ResBlock(128),
ResBlock(256),
])
self.W = nn.Linear(256, 256)
self.b = nn.Linear(256, 1)
def forward(self, inputs, levels, *args):
out = inputs
for res, conv in zip(self.res, self.conv):
out = func.silu(conv(out))
out = res(out, levels)
out = 2 ** 2 * func.avg_pool2d(out, 2)
features = out.size(-1) ** 2 * func.adaptive_avg_pool2d(out, 1)
features = features.view(features.size(0), -1)
quadratic = (features * self.W(features)).sum(dim=1, keepdim=True)
linear = self.b(features)
return quadratic + linear
class TotalEnergy(nn.Module):
def __init__(self, energy, levels):
super().__init__()
self.energy = energy
self.levels = levels
def forward(self, data: torch.Tensor, *args):
inputs = data.repeat_interleave(len(self.levels), dim=0)
levels = torch.cat(data.size(0) * [self.levels], dim=0)
factors = self.energy(inputs, levels, *args)
result = factors.view(-1, data.size(0), 1).sum(dim=0)
return result
class ConditionalEnergy(nn.Module):
def __init__(self, energy, origin, shift=0.025):
super().__init__()
self.energy = energy
self.origin = origin.detach()
self.shift = shift
def forward(self, data, level, *args):
raw_energy = self.energy(data, level)
dist = Normal(self.origin, self.shift)
cond = dist.log_prob(data)
cond = cond.view(cond.size(0), -1).mean(dim=1, keepdim=True)
return raw_energy + cond
if __name__ == "__main__":
opt = parse_options(
"CIFAR10 EBM using TNCE in flex.",
path="flexamples/cifar10-tdre-10",
device="cuda:0",
batch_size=8,
max_epochs=1000,
report_interval=1000
)
cifar10 = CIFAR10("examples/", download=False, transform=ToTensor())
data = CIFAR10Dataset(cifar10)
data = DataDistribution(
data, batch_size=opt.batch_size,
device=opt.device
)
base = Base().to(opt.device)
energy = Energy(base).to(opt.device)
levels = torch.arange(0.0, 1.0, 0.01, device=opt.device)
training = telescoping_density_ratio_training(
energy, base, data,
mixing=partial(
independent_mixing,
mixing=tdre_mixing,
levels=levels
),
optimizer_kwargs=dict(lr=1e-4),
telescoping_step=tdre_step,
train_base=False,
path=opt.path,
device=opt.device,
batch_size=opt.batch_size,
max_epochs=opt.max_epochs,
report_interval=opt.report_interval
)
# add generating images every few steps:
integrator = Langevin(
rate=-0.01, noise=0.01,
steps=5, max_norm=None,
clamp=(-1, 1)
)
training.add(
generate_step=partial(
generate_step, energy=energy,
base=base, integrator=integrator,
ctx=training
),
every=opt.report_interval
)
# training.get_step("tdre_step").extend(
# lambda args, ctx=None:
# ctx.log(real_images=LogImage(args.real_data.clamp(0, 1)))
# )
training.load()
training.train()
| [
"[email protected]"
] | |
fa79356736541e36a07ddcd18a65b5cb23c60ad7 | b2c24abff86b28ca8a495b3a3c3227f070737aa2 | /parlai/tasks/opensubtitles/build_2018.py | 271858012beab88fe3ac123c42db3a991c6a3074 | [
"MIT"
] | permissive | hengyicai/AdaND | d5dda1b2fcd2abd17be6603de632f0515382b37b | 5e3fefb1cf40c42215a37246efc64958ae6db005 | refs/heads/master | 2023-09-01T07:38:49.076947 | 2020-10-19T04:58:00 | 2020-10-19T04:58:00 | 204,633,631 | 10 | 2 | MIT | 2023-08-11T19:52:23 | 2019-08-27T06:20:39 | Python | UTF-8 | Python | false | false | 11,239 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import glob
import gzip
import multiprocessing
import os
import re
import sys
import time
import tqdm
import xml.etree.ElementTree as ET
NUM_MOVIE_FOLDERS = 140044
NUM_SUBTITLES_FILES = 446612
MAX_TIME_DIFFERENCE_S = 2
MIN_WORD_LENGTH = 2
MAX_WORD_LENGTH = 20
# remove brackets
CLEAN_BRACKETS_REGEX = re.compile(
r'<!--.*?-->|<[^>]*>|\([^\)]*\)|\[[^\]]*\]|\{[^\}]*\}|##|~'
)
# Usually, unbalanced brackets correspond to very noisy sentences
# '#' is usually pretty bad and means lyrics of the song
BRACKETS_CHARACTERS = ['[', ']', '(', ')', '{', '}', '<', '>', '#']
MULTI_WHITESPACES_REGEX = re.compile(r'\s+')
# Existing apostrophe tokenization in Open Subtitles is not compatible with nltk
APOSTROPHE_REPLACEMENT_REGEX = [
(re.compile(r"(\s?)n(\s?)'(\s?)t(\s|$)"), "\\1n't\\4"),
(re.compile(r"'(\s?)(s|re|em|im|bout|cause|ve|d|ll|ne)(\s+|$)"), " '\\2\\3"),
# it's a common (in OpenSubtitles) spelling error to use 'il instead of 'll
(re.compile(r"'(\s?)il(\s|$)"), " 'll\\2"),
(re.compile(r"(\s|^)i(\s?)'(\s?)(m|mm)(\s|$)"), "\\1i 'm\\5"),
(re.compile(r"in(\s?)'(\s|$)"), "ing\\2"),
(re.compile(r"(\s|^)ma(\s?)'(\s?)am(\s|$)"), "\\1ma'am\\4"),
(re.compile(r"(\s|^)c(\s?)'(\s?)mon(\s|$)"), "\\1c'mon\\4"),
(re.compile(r"(\s|^)o(\s?)'(\s?)clock(\s|$)"), "\\1o'clock\\4"),
(re.compile(r"(\s|^)y(\s?)'(\s?)all(\s|$)"), "\\1y'all\\4"),
]
# Some cleaning steps are taken from
CLEANUP_REGEX_RULES = [
# remove speaker tag "xxx: "
(re.compile(r'^\s*[A-z]*\s*:'), ''),
# remove unnecessary symbols
(re.compile(r"-{2,}"), ' '),
# delete a space right before a period for titles
(re.compile(r'(?<=( mr| jr| ms| dr| st|mrs)) \.'), '. '),
]
CLEANUP_REPLACE_RULES = [
('"', ' '),
("``", " "),
("''", " "),
("% %", " "),
("i̇", "i"),
]
def get_movie_id(filename_path):
dirpath, filename = os.path.split(filename_path)
_, movie_id_str = os.path.split(dirpath)
return int(movie_id_str)
# OpenSubtitles2016 contains have several subtitles per movie,
# stored in a separate folders.
# We gather all subtitles files based on the movie they correspond to
# and apply deduplication for the extracted replicas
def get_list_of_files(top_path):
result = {}
for path, _dirs, files in os.walk(top_path):
for filename in files:
if filename.endswith('.xml'):
full_filename = os.path.realpath(os.path.join(path, filename))
assert os.path.isfile(full_filename), 'Bad file ' + full_filename
movie_id = get_movie_id(full_filename)
if movie_id not in result:
result[movie_id] = []
result[movie_id].append(full_filename)
return result
def parse_xml(filepath):
extension = os.path.splitext(filepath)[1]
if extension == '.gz':
with gzip.open(filepath, 'r') as f:
return ET.parse(f)
else:
return ET.parse(filepath)
def normalize_whitespaces(sentence):
return MULTI_WHITESPACES_REGEX.sub(' ', sentence).strip()
def normalize_apostrophe(sentence):
sentence = normalize_whitespaces(sentence)
for rule in APOSTROPHE_REPLACEMENT_REGEX:
sentence = rule[0].sub(rule[1], sentence)
return sentence
def clean_text(words):
if len(words) > 0 and words[-1] == ':':
return None
sentence = ' '.join(words).strip(' -').lower()
sentence = CLEAN_BRACKETS_REGEX.sub('', sentence)
if len([ch for ch in BRACKETS_CHARACTERS if ch in sentence]) > 0:
return None
sentence = sentence.replace('\\\'', '\'')
if sentence.count('"') % 2 == 1:
# There are unmatched double-quotes.
# Usually, it means a quote got splitted into separate utterances,
# so it's bad example of a dialog
return None
sentence = normalize_apostrophe(sentence)
for (regex, replacement) in CLEANUP_REGEX_RULES:
sentence = regex.sub(replacement, sentence)
for (pattern, replacement) in CLEANUP_REPLACE_RULES:
sentence = sentence.replace(pattern, replacement)
words = normalize_whitespaces(sentence).split()
if (
len(words) > 0
and any(map(lambda k: re.search(r'\w', k) is not None, words))
and len(words) >= MIN_WORD_LENGTH
and len(words) <= MAX_WORD_LENGTH
):
return ' '.join(words)
else:
return None
def parse_time_str(time_value_str):
if not (
time_value_str is not None
and len(time_value_str) == 12
and time_value_str[2] == ':'
and time_value_str[5] == ':'
and time_value_str[8] == ','
):
return None
try:
return (
int(time_value_str[0:2]) * 3600
+ int(time_value_str[3:5]) * 60
+ int(time_value_str[6:8])
)
except ValueError:
return None
def extract_data_from_xml(xml_object):
previous_end_time = -1000
conversation = []
for sentence_node in xml_object.getroot():
if sentence_node.tag != 's':
continue
words = []
start_time, end_time = None, None
for node in sentence_node:
if node.tag == 'time':
time_value = parse_time_str(node.get('value'))
if time_value is None:
continue
if node.get('id')[-1] == 'S':
start_time = (
time_value
if start_time is None
else min(time_value, start_time)
)
elif node.get('id')[-1] == 'E':
end_time = (
time_value if end_time is None else max(time_value, end_time)
)
else:
raise Exception('Unknown time-id for node: %s' % node)
elif node.tag == 'w':
if node.text is not None and len(node.text) > 0:
words.append(node.text)
else:
pass
sentence = clean_text(words)
start_time = start_time or previous_end_time
end_time = end_time or previous_end_time
# add to the conversation
# flush and start new conversation
if (
sentence is not None
and start_time - previous_end_time <= MAX_TIME_DIFFERENCE_S
):
conversation.append(sentence)
else:
if len(conversation) > 1:
yield conversation
conversation = []
if sentence is not None:
conversation.append(sentence)
previous_end_time = max(start_time, end_time)
def conversation_to_fb_format(conversation):
assert len(conversation) > 1
lines = []
for i in range(0, len(conversation), 2):
if i + 1 < len(conversation):
lines.append(
'%d %s\t%s' % (i / 2 + 1, conversation[i], conversation[i + 1])
)
else:
lines.append('%d %s' % (i / 2 + 1, conversation[i]))
return '\n'.join(lines)
def conversation_to_basic_format(conversation):
assert len(conversation) > 1
lines = []
for i in range(len(conversation)):
if i + 1 < len(conversation):
lines.append('1 %s\t%s' % (conversation[i], conversation[i + 1]))
return '\n'.join(lines)
class DataProcessor(object):
def __init__(self, use_history):
self.use_history = use_history
def __call__(self, movie_id_with_files):
movie_id, files = movie_id_with_files
data = set()
for filepath in files:
try:
xml_object = parse_xml(filepath)
for conversation in extract_data_from_xml(xml_object):
if self.use_history:
data.add(conversation_to_fb_format(conversation))
else:
data.add(conversation_to_basic_format(conversation))
except ET.ParseError:
# TODO: We possibly can log these errors,
# but I'm not sure how it would intervene with the PrograssLogger
pass
except Exception:
print(
'Unexpected error for file %s:\n%s' % (filepath, sys.exc_info()[0]),
file=sys.stderr,
)
raise
data_str = '\n'.join(data) + ('\n' if len(data) > 0 else '')
return data_str
def create_fb_format(inpath, outpath, use_history):
print('[building fbformat]')
start_time = time.time()
ftrain = open(os.path.join(outpath, 'train.txt'), 'w')
fvalid = open(os.path.join(outpath, 'valid.txt'), 'w')
ftest = open(os.path.join(outpath, 'test.txt'), 'w')
movie_dirs = get_list_of_files(inpath)
total_movie_dirs = len(movie_dirs)
total_files = sum([len(l) for l in movie_dirs.values()])
print(
'[Found %d movie folders and %d subtitles within %s in %d seconds]'
% (total_movie_dirs, total_files, inpath, time.time() - start_time)
)
assert total_movie_dirs == NUM_MOVIE_FOLDERS, 'Incorrect number of movies'
assert total_files == NUM_SUBTITLES_FILES, 'Incorrect number of files'
processor = DataProcessor(use_history)
with multiprocessing.Pool(processes=os.cpu_count()) as pool:
for i, s in enumerate(pool.imap(processor, tqdm.tqdm(movie_dirs.items()))):
handle = ftrain
# TODO: Shall we use smaller valid/test sets? Even 10% is A LOT here
if i % 10 == 0:
handle = ftest
if i % 10 == 1:
handle = fvalid
handle.write(s)
ftrain.close()
fvalid.close()
ftest.close()
print(
'[Data has been successfully extracted in %d seconds]'
% (time.time() - start_time,)
)
def build(datapath, use_history):
dpath = os.path.join(datapath, 'OpenSubtitles2018')
if not use_history:
dpath += '_no_history'
version = '1'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
untar_path = os.path.join(dpath, 'OpenSubtitles', 'xml', 'en')
if len(glob.glob(untar_path + '/*/*/*.xml')) != NUM_SUBTITLES_FILES:
# Download the data.
url = 'https://object.pouta.csc.fi/OPUS-OpenSubtitles/v2018/xml/en.zip'
build_data.download(url, dpath, 'OpenSubtitles2018.zip')
build_data.untar(dpath, 'OpenSubtitles2018.zip')
create_fb_format(untar_path, dpath, use_history)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
return dpath
| [
"[email protected]"
] | |
efdfe5a0a9fd4511946056b84590b1ff8569b14c | 4e7669f4234dbbcc6ef8206ac43bba33c53b8d1e | /Predictions/DataProcessing.py | 73ecbdb3270823e4d440ea305f64b9d0f26fce93 | [] | no_license | chouhansolo/edbn | 46cadbcb8d4e079cee746868663379b5b825286b | 63cfcd7e5e5e17242aed3b1968119e85b2796015 | refs/heads/master | 2023-04-05T06:54:26.380161 | 2021-04-21T08:50:30 | 2021-04-21T08:50:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,175 | py | """
Author: Stephen Pauwels
"""
import os
import pickle
import pandas as pd
from RelatedMethods.Camargo.support_modules.role_discovery import role_discovery
from Utils.LogFile import LogFile
BPIC15 = "BPIC15"
BPIC15_1 = "BPIC15_1"
BPIC15_2 = "BPIC15_2"
BPIC15_3 = "BPIC15_3"
BPIC15_4 = "BPIC15_4"
BPIC15_5 = "BPIC15_5"
BPIC12 = "BPIC12"
BPIC12W = "BPIC12W"
HELPDESK = "HELPDESK"
BPIC18 = "BPIC18"
LOGFILE_PATH = "../Data/Logfiles"
def preprocess(logfile, add_end, reduce_tasks, resource_pools, resource_attr, remove_resource):
# Discover Roles
if resource_pools and resource_attr is not None:
resources, resource_table = role_discovery(logfile.get_data(), resource_attr, 0.5)
log_df_resources = pd.DataFrame.from_records(resource_table)
log_df_resources = log_df_resources.rename(index=str, columns={"resource": resource_attr})
print(logfile.data)
logfile.data = logfile.data.merge(log_df_resources, on=resource_attr, how='left')
logfile.categoricalAttributes.add("role")
if remove_resource:
logfile.data = logfile.data.drop([resource_attr], axis=1)
resource_attr = "role"
else:
logfile.data = logfile.data.rename(columns={resource_attr: "role"})
logfile.categoricalAttributes.add("role")
print(logfile.data)
if add_end:
cases = logfile.get_cases()
new_data = []
for case_name, case in cases:
record = {}
for col in logfile.data:
if col == logfile.trace:
record[col] = case_name
else:
record[col] = "start"
new_data.append(record)
for i in range(0, len(case)):
new_data.append(case.iloc[i].to_dict())
record = {}
for col in logfile.data:
if col == logfile.trace:
record[col] = case_name
else:
record[col] = "end"
new_data.append(record)
logfile.data = pd.DataFrame.from_records(new_data)
# Check for dublicate events with same resource
if reduce_tasks and resource_attr is not None:
cases = logfile.get_cases()
reduced = []
for case_name, case in cases:
reduced.append(case.iloc[0].to_dict())
current_trace = [case.iloc[0][[logfile.activity, resource_attr]].values]
for i in range(1, len(case)):
if case.iloc[i][logfile.activity] == current_trace[-1][0] and \
case.iloc[i][resource_attr] == current_trace[-1][1]:
pass
else:
current_trace.append(case.iloc[i][[logfile.activity, resource_attr]].values)
reduced.append(case.iloc[i].to_dict())
logfile.data = pd.DataFrame.from_records(reduced)
print("Removed duplicated events")
logfile.convert2int()
return logfile
def get_data(dataset, dataset_size, k, add_end, reduce_tasks, resource_pools, remove_resource):
filename_parts = [dataset, str(dataset_size), str(k)]
for v in [add_end, reduce_tasks, resource_pools, remove_resource]:
if v:
filename_parts.append(str(1))
else:
filename_parts.append(str(0))
print(filename_parts)
cache_file = LOGFILE_PATH + "/" + "_".join(filename_parts)
colTitles = []
if os.path.exists(cache_file):
print("Loading file from cache")
with open(cache_file, "rb") as pickle_file:
preprocessed_log = pickle.load(pickle_file)
else:
resource_attr = None
if dataset == BPIC15_1 or dataset == BPIC15:
logfile = LogFile("../Data/BPIC15_1_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID", activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC15_2:
logfile = LogFile("../Data/BPIC15_2_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID",
activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC15_3:
logfile = LogFile("../Data/BPIC15_3_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID", activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC15_4:
logfile = LogFile("../Data/BPIC15_4_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID", activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC15_5:
logfile = LogFile("../Data/BPIC15_5_sorted_new.csv", ",", 0, dataset_size, "Complete Timestamp", "Case ID", activity_attr="Activity", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["Case ID", "Activity", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC12:
logfile = LogFile("../Data/BPIC12.csv", ",", 0, dataset_size, "completeTime", "case", activity_attr="event", convert=False, k=k)
resource_attr = "org:resource"
colTitles = ["case", "event", "org:resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == BPIC12W:
logfile = LogFile("../Data/BPIC12W.csv", ",", 0, dataset_size, "completeTime", "case", activity_attr="event", convert=False, k=k)
resource_attr = "org:resource"
colTitles = ["case", "event", "org:resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(5)
elif dataset == HELPDESK:
logfile = LogFile("../Data/Helpdesk.csv", ",", 0, dataset_size, "completeTime", "case", activity_attr="event", convert=False, k=k)
resource_attr = "Resource"
colTitles = ["case", "event", "Resource"]
logfile.keep_attributes(colTitles)
logfile.filter_case_length(3)
elif dataset == BPIC18:
logfile = LogFile("../Data/BPIC18.csv", ",", 0, dataset_size, "startTime", "case", activity_attr="event", convert=False, k=k)
colTitles = ["case", "event", "subprocess"]
logfile.keep_attributes(colTitles)
else:
print("Unknown Dataset")
return None
preprocessed_log = preprocess(logfile, add_end, reduce_tasks, resource_pools, resource_attr, remove_resource)
preprocessed_log.create_k_context()
with open(cache_file, "wb") as pickle_file:
pickle.dump(preprocessed_log, pickle_file)
return preprocessed_log, "_".join(filename_parts)
def calc_charact():
import numpy as np
print("Calculating characteristics")
datasets = [BPIC12, BPIC12W, BPIC15_1, BPIC15_2, BPIC15_3, BPIC15_4, BPIC15_5, HELPDESK]
for dataset in datasets:
logfile, name = get_data(dataset, 20000000, 0, False, False, False, True)
cases = logfile.get_cases()
case_lengths = [len(c[1]) for c in cases]
print("Logfile:", name)
print("Num events:", len(logfile.get_data()))
print("Num cases:", len(cases))
print("Num activities:", len(logfile.get_data()[logfile.activity].unique()))
print("Avg activities in case:", np.average(case_lengths))
print("Max activities in case:", max(case_lengths))
print()
if __name__ == "__main__":
calc_charact()
| [
"[email protected]"
] | |
240b97aea52be8a26c3a5cf1be0c510ebeff50e0 | bff37773d1e6c3f4bf8ae4eaa64d7a2d563ecf68 | /backend/users/migrations/0002_auto_20201217_0711.py | 46bfcb7d1b2be14df2fe21ac2b64e683539ccceb | [] | no_license | crowdbotics-apps/mobile-17-dec-dev-16856 | d405478f85248047e00ed97cd4b61fa5ca2a8fd6 | b5c60c39b4e6715b17fa1e7dff6c72527f6ae967 | refs/heads/master | 2023-02-03T00:16:51.489994 | 2020-12-17T14:12:46 | 2020-12-17T14:12:46 | 322,203,166 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | # Generated by Django 2.2.17 on 2020-12-17 07:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("users", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="user",
name="name",
field=models.CharField(blank=True, max_length=255, null=True),
),
]
| [
"[email protected]"
] | |
2c705f84317dd7fb1c449692c21c19157c862a5f | e87d793b3a5facc6e54e0263fbd67703e1fbb382 | /duckietown-world-venv/bin/jupyter-trust | b408b0d21630fc79d1e9443daa6b2a05bc45d37c | [] | no_license | llingg/behaviour-benchmarking | a860bbe709309e13f3e1133d916944882199a40f | 85bbf1a9c2c628ba74480fe7abac3804d6afdac4 | refs/heads/v1 | 2022-10-06T08:21:29.068329 | 2020-06-11T07:02:46 | 2020-06-11T07:02:46 | 259,622,704 | 0 | 0 | null | 2020-06-02T17:52:46 | 2020-04-28T11:52:08 | C++ | UTF-8 | Python | false | false | 302 | #!/home/linuslingg/duckietown-world/duckietown-world-venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from nbformat.sign import TrustNotebookApp
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(TrustNotebookApp.launch_instance())
| [
"[email protected]"
] | ||
693351d13cbca26f5dc2f674b07e879c28cc09eb | b3a55844de9ff46972448b56ccadc1e3088adae1 | /poptimizer/data/views/go.py | 8e124ea774a85d74b096f602bcad3c5d32f544ed | [
"Unlicense"
] | permissive | tjlee/poptimizer | 480a155e2f4ffd5d6eda27323c5baa682d7f9f00 | 3a67544fd4c1bce39d67523799b76c9adfd03969 | refs/heads/master | 2023-08-15T10:16:11.161702 | 2021-10-15T15:35:38 | 2021-10-15T15:35:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 583 | py | """Предварительная версия интеграции с Go."""
import aiohttp
from bson import json_util
from poptimizer.shared import connections
async def rest_reader(session: aiohttp.ClientSession = connections.HTTP_SESSION):
async with session.get("http://localhost:3000/trading_dates/trading_dates") as respond:
respond.raise_for_status()
json = await respond.text()
return json_util.loads(json)
if __name__ == "__main__":
import asyncio
loop = asyncio.get_event_loop()
print(loop.run_until_complete(rest_reader()))
| [
"[email protected]"
] | |
f611a878a16540a8544d96b179da3dbe91d2edf7 | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /MY_REPOS/INTERVIEW-PREP-COMPLETE/notes-n-resources/Data-Structures-N-Algo/_DS-n-Algos/_Another-One/Project Euler/Problem 04/sol1.py | ba8a39290c9cd8d45a5050c08b2e276e81e6c6f9 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 872 | py | """
Problem:
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 x 99.
Find the largest palindrome made from the product of two 3-digit numbers which is less than N.
"""
from __future__ import print_function
limit = int(input("limit? "))
# fetchs the next number
for number in range(limit - 1, 10000, -1):
# converts number into string.
strNumber = str(number)
# checks whether 'strNumber' is a palindrome.
if strNumber == strNumber[::-1]:
divisor = 999
# if 'number' is a product of two 3-digit numbers
# then number is the answer otherwise fetch next number.
while divisor != 99:
if (number % divisor == 0) and (len(str(number / divisor)) == 3):
print(number)
exit(0)
divisor -= 1
| [
"[email protected]"
] | |
665db8b66745cfd5b563df3b665ec192d4fb6d31 | 3513dda3d40c26998288c49daca62f185d70ff84 | /mutations.py | 02525937d356e434f9c3c7d7dd00331bbc3a4232 | [] | no_license | Nan-Do/dag_generator | a6b992d538a94a8ca805aab40dc3053e52d3cd7e | 522ba85c67b2b433063f17169694f21203fc530c | refs/heads/master | 2021-05-02T02:13:22.632888 | 2018-03-01T01:25:06 | 2018-03-01T01:25:06 | 120,881,289 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,986 | py | from itertools import chain
from random import shuffle, choice, randint
from string import ascii_lowercase, ascii_uppercase, digits
from graph import Position, GraphLink
from utils import DEBUG
class MutateGraph:
"""
This class performs mutations to a graph
"""
def __generate_file_name(self):
"""
Generate a file name using the data of the graph being mutated
Auxiliary function
"""
file_name = self.graph.output_directory
if file_name[-1] != '/':
file_name += '/'
file_name += 'graph-' + self.graph.id
return file_name
def __compute_graph_nodes(self, graph):
nodes = set()
t = self.graph.treelevels
for link in graph.treelinks:
level, block, position = link.orig
nodes.add(t[level][block][position])
level, block, position = link.dest
nodes.add(t[level][block][position])
return nodes
def __mutation_string_generator(self):
"""
Generate a string representation of the mutation opcodes.
Auxiliary function
"""
for mutation in self.mutations:
if mutation[0] == "DUPLICATE":
to_duplicate = mutation[0]
to_remove = mutation[1]
yield "Duplicating node: {} Removing: {}".format(to_duplicate,
to_remove)
elif mutation[0] == "ADD_NODE":
block = mutation[1]
node = mutation[2]
position = mutation[3]
yield "Adding node: {}, Block: {}, Position: {}".format(node,
block,
position)
elif mutation[0] == "SWAP_NODES":
source_node = mutation[1]
dest_node = mutation[2]
yield "Swapping nodes: {} with {}".format(source_node,
dest_node)
elif mutation[0] == "RELABEL":
node_to_be_changed = mutation[1]
node_to_change_to = mutation[2]
yield "Relabeling node: {}, {}".format(node_to_be_changed,
node_to_change_to)
elif mutation[0] == "DELETE":
orig_node = mutation[1]
dest_node = mutation[2]
yield "Removing link: {}, {}".format(orig_node,
dest_node)
elif mutation[0] == "REORDER_PATH":
nodes = mutation[1]
reordered_branch = mutation[2]
yield "Reordering path: {}, {}".format(nodes,
reordered_branch)
elif mutation[0] == "REORDER_BLOCK":
orig_block = mutation[1]
ordered_block = mutation[2]
yield "Reordering block: {}, {}".format(orig_block,
ordered_block)
else:
yield "UNKNOWN OPERATION: {}".format(mutation)
def __compute_mutations_score(self):
"""
Compute the expected score for the applied mutations.
This function computes the expected result for the applied
mutations. With the current scoring functions the score
is computed in terms of the difference of number of nodes
That means that the addition always adds one element and
the deletion removes one if it deletes a node. This is
not always warrantied as we are dealing with dags and
there might be more than one way to reach a node.
"""
# score = 0
added_nodes = set()
deleted_nodes = set()
for m in self.mutations:
if m[0] == 'ADD_NODE':
# score += 1
added_nodes.add(m[2])
if m[0] == 'DELETE':
dest_node = m[2]
skip_mutation = False
t = self.graph.treelevels
for link in self.graph.treelinks:
level, block, position = link.dest
if dest_node == t[level][block][position]:
skip_mutation = True
break
if skip_mutation:
continue
# score -= 1
if m[2] in added_nodes:
added_nodes.remove(dest_node)
continue
deleted_nodes.add(dest_node)
# return abs(len(added_nodes) - len(deleted_nodes))
return abs(len(added_nodes) + len(deleted_nodes))
def __get_nodes_to_add(self, new_identifiers):
"""
Generate a list of nodes ordered randomly that are not present in the
current graph.
new_identifiers -> In case all the possible identifies are taken
specify how many need to be generated.
"""
nodes = self.graph.nodes
# Check which identifiers have been used
nodes_to_add = set(chain.from_iterable([list(ascii_lowercase),
list(ascii_uppercase),
list(digits)]))
nodes_to_add.symmetric_difference_update(nodes)
# In case there are no identifiers available generate new ones.
if len(nodes_to_add) == 0:
last = max(nodes)
nodes_to_add = set(xrange(last+1, last+1+new_identifiers))
nodes_to_add = list(nodes_to_add)
shuffle(nodes_to_add)
return nodes_to_add
def add_node(self, times):
"""
Mutation that adds a node to the current graph
times -> How many relabelings we must perform.
"""
treelevels = self.graph.treelevels
nodes_to_add = self.__get_nodes_to_add(times)
for _ in xrange(times):
node = nodes_to_add.pop()
level = randint(1, len(treelevels) - 1)
block = randint(0, len(treelevels[level]) - 1)
position = randint(0, len(treelevels[level][block]) - 1)
if DEBUG:
print " Adding node ", node, "to block",\
treelevels[level][block], "at position", position
self.mutations.append(("ADD_NODE",
list(treelevels[level][block]),
node,
position))
treelevels[level][block].insert(position, node)
self.graph.nodes += (node,)
# Update treelinks
# Add the new link
father = None
link_index = 0
new_treelinks = []
for pos, link in enumerate(self.graph.treelinks):
dest = link.dest
if dest.level == level and dest.block == block:
if dest.position >= position:
father = link.orig
if dest.position == position:
link_index = pos
new_link = GraphLink(father,
Position(level,
block,
dest.position + 1))
new_treelinks.append(new_link)
continue
new_treelinks.append(link)
new_link = GraphLink(father,
Position(level,
block,
position))
new_treelinks.insert(link_index, new_link)
self.graph.treelinks = new_treelinks
def swap_nodes(self, times):
"""
Mutation that swaps two nodes from the current graph.
times -> How many swaps we must perform.
"""
nodes = list(self.graph.nodes)
shuffle(nodes)
treelevels = self.graph.treelevels
if DEBUG:
print "\nSwapping mutations:"
if times > (len(nodes) / 2):
print "Warning::Specfied more swappings than the highest " +\
"number possible for the current graph"
times = len(nodes) / 2
for x in xrange(times):
source_node = nodes[x]
dest_node = nodes[x]
self.mutations.append(("SWAP_NODES", source_node, dest_node))
if DEBUG:
print " Swapping nodes ", source_node, dest_node
for level in treelevels:
for block in level:
if source_node in block and dest_node in block:
a = block.index(source_node)
b = block.index(dest_node)
block[a], block[b] = block[b], block[a]
elif source_node in block:
index = block.index(source_node)
block[index] = dest_node
elif dest_node in block:
index = block.index(dest_node)
block[index] = source_node
def swap_links(self, times):
"""
Mutation that swaps the to nodes that share a father-child relationship.
times -> How many swaps we must perform.
"""
link_positions = range(0, len(self.graph.treelinks))
shuffle(link_positions)
if times > len(link_positions):
print "Warning::Specifier a higher number than the " +\
"maximum number of swappings"
times = len(link_positions)
for x in xrange(times):
link_position = link_positions[x]
orig, dest = self.graph.treelinks[link_position]
source_node = self.graph.treelevels[orig.level][orig.block][orig.position]
dest_node = self.graph.treelevels[dest.level][dest.block][dest.position]
self.mutations.append(("SWAP_NODES", source_node, dest_node))
if DEBUG:
print " Swapping nodes ", source_node, dest_node
orig_block = self.graph.treelevels[orig.level][orig.block]
dest_block = self.graph.treelevels[dest.level][dest.block]
orig_block[orig.position], dest_block[dest.position] =\
dest_block[dest.position], orig_block[orig.position]
def relabel_node(self, times):
"""
Mutation that relabels a node whitin the graph.
times -> How many relabelings we must perform.
The mutation occurs changing one of the node identifiers with an
identifier that has not been used. If all the identifiers have been
used new identifiers as numbers will be generated.
"""
treelevels = self.graph.treelevels
if DEBUG:
print "\nRelabeling mutations:"
if times > len(self.graph.nodes):
print 'Warning::Requesting more changes than nodes the graph ' +\
'contains'
times = len(self.graph.nodes)
nodes_to_add = self.__get_nodes_to_add(times)
nodes_to_be_changed = list(self.graph.nodes)
shuffle(nodes_to_be_changed)
# Perform the relabelings
for x in xrange(times):
node_to_be_changed = nodes_to_be_changed[x]
node_to_change_to = nodes_to_add[x]
self.mutations.append(("RELABEL",
node_to_be_changed,
node_to_change_to))
if DEBUG:
print "Changing node:", node_to_be_changed,\
"for node", node_to_change_to
for level in treelevels:
for block in level:
if node_to_be_changed in block:
index = block.index(node_to_be_changed)
block[index] = node_to_change_to
def delete_path(self, times, start_from_root=False):
"""
Mutation that deletes a path on the graph.
times -> How many paths to remove.
start_from_root -> Does the path need to start from the root node?
"""
treelevels = self.graph.treelevels
treelinks = self.graph.treelinks
if not treelinks:
print "Warning::No more branchs to delete"
return
if times > len(treelinks):
print "Warning::Specified to remove more links than the ones that are available"
times = len(treelinks)
orig_link = choice(treelinks)
if start_from_root:
root = Position(0, 0, 0)
orig_link = choice(filter(lambda x: x.orig == root,
treelinks))
frontier = [orig_link]
if DEBUG:
print "Removing branch:"
while times > 0:
if len(treelinks) == 1:
print "Warning::The graph contains only link aborting " +\
"the deleteion"
return
if not frontier:
frontier = [choice(treelinks)]
while frontier:
link = frontier.pop()
treelinks.remove(link)
orig = link.orig
dest = link.dest
orig_node = treelevels[orig.level][orig.block][orig.position]
dest_node = treelevels[dest.level][dest.block][dest.position]
times -= 1
self.mutations.append(("DELETE", orig_node, dest_node))
if DEBUG:
print "Removing link from node ", orig_node, "to", dest_node
# There is still a path that can reach the current dest node
# no need to remove its descecndants
if filter(lambda x: x.dest == dest, treelinks):
continue
# Get all the links that start on the dest node
links = filter(lambda x: x.orig == dest, treelinks)
frontier.extend(links)
def reorder_path(self, start_from_root=True):
"""
Mutation that reorders a path on the graph.
times -> How many paths to reorder.
start_from_root -> Does the path need to start from the root node?
"""
treelevels = self.graph.treelevels
treelinks = self.graph.treelinks
orig_link = choice(treelinks)
if start_from_root:
root = Position(0, 0, 0)
orig_link = choice(filter(lambda x: x.orig == root,
treelinks))
orig_node = treelevels[orig_link.orig.level]\
[orig_link.orig.block]\
[orig_link.orig.position]
nodes = []
nodes.append(orig_node)
positions = [orig_link.orig]
if DEBUG:
print "Reordering a path:"
frontier = [orig_link]
while frontier:
link = frontier.pop()
dest = link.dest
dest_node = treelevels[dest.level][dest.block][dest.position]
nodes.append(dest_node)
positions.append(dest)
# Get all the links that start on the dest node
links = filter(lambda x: x.orig == dest, treelinks)
if links:
link = choice(links)
frontier.append(link)
reordered_branch = list(nodes)
shuffle(reordered_branch)
self.mutations.append(('REORDER_PATH',
list(nodes),
reordered_branch))
if DEBUG:
print "Reordering path:", nodes, "to", reordered_branch
for node, p in zip(reordered_branch, positions):
level, block, position = p
treelevels[level][block][position] = node
def reorder_block(self, times):
"""
Mutation that reorders the children of a node.
times -> How many blocks do we have to reorders.
"""
treelevels = self.graph.treelevels
for _ in xrange(times):
level = randint(1, len(treelevels) - 1)
block = randint(0, len(treelevels[level]) - 1)
orig_block = list(treelevels[level][block])
shuffle(treelevels[level][block])
self.mutations.append(('REORDER_BLOCK',
orig_block,
list(treelevels[level][block])))
if DEBUG:
print "Reordering block", orig_block, "reordered into", treelevels[level][block]
def redundancy(self, times):
"""
Mutation that relabels the identifier of a node with another existing
identifier of the graph.
times -> How many nodes do we have to copy.
"""
treelevels = self.graph.treelevels
for _ in xrange(times):
nodes = chain.from_iterable(chain.from_iterable(treelevels))
shuffle(nodes)
to_duplicate = nodes[0]
to_remove = nodes[1]
self.mutations.append(("DUPLICATE", to_duplicate, to_remove))
if DEBUG:
print "Duplicating node:", to_duplicate, "Removing:", to_remove
if len(to_duplicate) == 1:
to_duplicate += '1'
for level in treelevels:
for block in level:
if to_remove in block:
index = block.index(to_remove)
block[index] = to_duplicate
def print_mutations_summary(self):
"""
Show a summary of the applied mutations.
"""
SPACES = ' ' * 3
print "Mutations for graph " + self.graph.id + ":"
for s in self.__mutation_string_generator():
print SPACES + s
print
print SPACES + "Score:", str(self.__compute_mutations_score())
def store_mutations_summary_to_file(self):
"""
Write the summary of the generated mutations into a file
"""
file_name = self.__generate_file_name()
with open(file_name + '-mutations.txt', 'w') as f:
for s in self.__mutation_string_generator():
f.write(s)
f.write('\n')
f.write("Score: " + str(self.__compute_mutations_score()))
f.write('\n')
def store_mutation_opcodes_to_file(self, field_separator=' '):
"""
Store the opcodes for the generated mutations
field_separator -> the separator for the fields.
"""
file_name = self.__generate_file_name()
with open(file_name + '-opcodes.txt', 'w') as f:
for mutation in self.mutations:
opcode = mutation[0]
operands = []
for op in mutation[1:]:
# Preprocess lists to remove the spaces
# string.translate can also be used to achieve the
# same effect but it is less portable
if isinstance(op, list):
r = ','.join(map(lambda x: "'" + x + "'" if
isinstance(x, str) else str(x),
op))
r = '[' + r + ']'
operands.append(r)
else:
operands.append(str(op))
# operands = field_separator.join(map(str, mutation[1:]))
operands = field_separator.join(operands)
f.write(opcode + field_separator + operands + "\n")
def __init__(self, graph):
self.mutations = []
self.graph = graph
self.graph.mutated = True
| [
"[email protected]"
] | |
f1504401ecfae9c68665a07df227490f7bdde2e6 | 4f3a4c194451eae32f1ff7cf3b0db947e3892365 | /39/main.py | 380f721980637a5dbb3d095e6966d349ecfd7c39 | [] | no_license | szhongren/leetcode | 84dd848edbfd728b344927f4f3c376b89b6a81f4 | 8cda0518440488992d7e2c70cb8555ec7b34083f | refs/heads/master | 2021-12-01T01:34:54.639508 | 2021-11-30T05:54:45 | 2021-11-30T05:54:45 | 83,624,410 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,699 | py | """
Given a set of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.
The same repeated number may be chosen from C unlimited number of times.
Note:
All numbers (including target) will be positive integers.
The solution set must not contain duplicate combinations.
For example, given candidate set [2, 3, 6, 7] and target 7,
A solution set is:
[
[7],
[2, 2, 3]
]
"""
class Solution(object):
def combinationSum(self, candidates, target):
"""
:type candidates: List[int]
:type target: int
:rtype: List[List[int]]
"""
self.res = []
self.combinationSumRecur(sorted(candidates), target, 0, [])
# self.res = list(map(sorted, self.res))
# self.dedup = []
# for s in self.res:
# if s not in self.dedup:
# self.dedup.append(s)
return self.res
def combinationSumRecur(self, candidates, target, start, curr_set):
if target == 0:
self.res.append(curr_set)
else:
for i in range(start, len(candidates)):
if candidates[i] > target:
return
else:
self.combinationSumRecur(candidates, target - candidates[i], i, curr_set + [candidates[i]])
# for each val in candidates, get target - val, then see if that is in candidates
# if yes, add current set of vals to self.res
# recur on target - val
ans = Solution()
print(ans.combinationSum([2, 3, 6, 7], 7))
print(ans.combinationSum([92,71,89,74,102,91,70,119,86,116,114,106,80,81,115,99,117,93,76,77,111,110,75,104,95,112,94,73], 310)) | [
"[email protected]"
] | |
482d241112ea052ce15aca3724fab31234ee9eaf | 18ab6f3ac3458db61f506bee8885c70d6de6c06e | /class_12/userhandling/accounts/models.py | 69c6252de80e13838a61f0d57c38f6c4fdd2727d | [] | no_license | coding-blocks-archives/Django2017Spring | 8ca7a14e2d867cb07a60d2dca1c9138cada6c06a | 008c32bc725918e93a0020b39e226c634b6f2e0f | refs/heads/master | 2021-06-14T15:19:40.830677 | 2017-04-16T11:22:04 | 2017-04-16T11:22:04 | 79,050,330 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 966 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django import forms
# Create your models here.
class MyUser(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=50, default='')
address = models.CharField(max_length=300, default='')
contact = models.CharField(max_length=12, null=True)
def __unicode__(self):
return self.user.username
class RegisterForm(forms.Form):
name = forms.CharField(max_length=50, label='Your Name')
username = forms.CharField(max_length=20, label='Username')
password = forms.CharField(widget=forms.PasswordInput(), label='Password')
address = forms.CharField(max_length=200, label='Your Address')
contact = forms.CharField(max_length=12, label='You Contact')
class LoginForm(forms.Form):
username = forms.CharField(max_length=20, label='Username')
password = forms.CharField(widget=forms.PasswordInput(), label='Password') | [
"[email protected]"
] | |
d3cdf6d0c1f087462067307c535a1697419582a0 | 6f50200dd3b99ba36e5e2902c477f80535e85c95 | /Lecture/Lecture17-Heaps/std/exceptions.py | 47f1908dabe31c08ae6582898dcafb23c66f4bd8 | [
"MIT"
] | permissive | tonysulfaro/CSE-331 | 8d767f0e381d613593016d06e0222cb6dca00ab6 | b4f743b1127ebe531ba8417420d043e9c149135a | refs/heads/master | 2021-06-25T18:36:45.122524 | 2020-10-19T05:06:22 | 2020-10-19T05:06:22 | 144,987,165 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | py | class Empty(Exception):
"""Error attempting to access an element from an empty container."""
pass
| [
"[email protected]"
] | |
4255b09ee52495dfc8984febfc0cf9cfe0f5ca64 | d86a7fcc543ab6066ca772f67551943ec4cad31a | /perf/metrics/aggregator.py | 4d53cd7d3a0a44abc20d2129109d660a3f90cf05 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | fossabot/lake | 184b4db5a14725da919093ef0cb392c329166b89 | 75f2bf10ef50bb4979e52a7ce539ea5de00d3647 | refs/heads/master | 2022-12-11T10:00:46.239848 | 2020-09-16T05:04:37 | 2020-09-16T05:04:37 | 295,928,404 | 0 | 0 | Apache-2.0 | 2020-09-16T05:04:36 | 2020-09-16T05:04:35 | null | UTF-8 | Python | false | false | 1,189 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import json
import time
import os
import threading
class MetricsAggregator(threading.Thread):
def __init__(self, path):
super(MetricsAggregator, self).__init__()
self._stop_event = threading.Event()
self.__store = dict()
self.__path = path
self.__last_value = None
def stop(self) -> None:
self._stop_event.set()
self.join()
time.sleep(0.5)
self.__process_change()
def __process_change(self) -> None:
if not os.path.isfile(self.__path):
return
try:
with open(self.__path, mode='r', encoding='ascii') as fd:
data = json.load(fd)
(i, e, m) = data['messageIngress'], data['messageEgress'], data['memoryAllocated']
del data
value = '{}/{}/{}'.format(i, e, m)
if value != self.__last_value:
self.__store[str(int(time.time()*1000))] = value
self.__last_value = value
except:
pass
def get_metrics(self) -> dict:
return self.__store
def run(self) -> None:
self.__process_change()
while not self._stop_event.is_set():
self.__process_change()
time.sleep(1)
self.__process_change()
| [
"[email protected]"
] | |
4e9776b12ce251408a9c5871641abe9f9225f6b2 | d79f3a31d173f18ec112c521acdcee8e8e73724d | /getid.py | 8a6fcb90734975a7c0dfc8ede803ef708a2c3468 | [] | no_license | k156/hello | 3de815de569b38f8260e774e57b138f4da43f480 | f5a7f386d3f78d15d7f166a95ad25724e168f472 | refs/heads/master | 2020-04-04T23:15:38.252126 | 2019-05-03T05:57:00 | 2019-05-03T05:57:00 | 156,352,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,268 | py | from time import sleep
from selenium import webdriver
from bs4 import BeautifulSoup
import requests
USER = ""
PASS = ""
browser = webdriver.Chrome()
browser.implicitly_wait(3)
# 로그인 페이지에 접근하기.
url_login = "https://www.yes24.com/Templates/FTLogin.aspx?ReturnURL=http://ticket.yes24.com/Pages/Perf/Detail/Detail.aspx&&ReturnParams=IdPerf=30862"
browser.get(url_login)
print("로그인 페이지에 접근합니다.")
# 아이디와 비밀번호 입력하기.
e = browser.find_element_by_id("SMemberID")
e.clear()
e.send_keys(USER)
e = browser.find_element_by_id("SMemberPassword")
e.clear()
e.send_keys(PASS)
# 입력 양식 전송해서 로그인하기.
form = browser.find_element_by_css_selector("button#btnLogin").submit()
print("로그인 버튼을 클릭합니다.")
# 예매버튼 클릭.
reserve_bt = browser.find_element_by_class_name("rbt_reserve").click()
print("예매 버튼을 클릭합니다.")
# 팝업 창으로 전환.
browser.switch_to.window(browser.window_handles[1])
# 날짜 선택하기(26일)
date_sel = browser.find_element_by_id("2019-01-17").click()
sleep(1)
# '좌석선택' 버튼 클릭.
browser.find_element_by_css_selector("div.fr img").click()
soup = BeautifulSoup(res.text, 'html.parser')
print(soup) | [
"[email protected]"
] | |
c83d71dab57d4a80dd8b19afa5d84df75f156ca3 | 09e60fdb635f331c139d191a5c53244afb2302e7 | /_footer.py | 3dc47ab91d78dfc77868319be2897e3a7fc17ca7 | [
"LicenseRef-scancode-generic-exception",
"BSD-3-Clause",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | GrahamDumpleton-abandoned/ose | 5e88dce5883f2e648eac992f1034a7a78bb81189 | 7b1d1a5de4812cdd5daab40d3604a76474389d47 | refs/heads/master | 2021-01-05T11:17:53.152854 | 2012-12-23T10:03:53 | 2012-12-23T10:03:53 | 241,006,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 87 | py | def footer(req):
return "Copyright © Dumpleton Software Consulting Pty Limited"
| [
"devnull@localhost"
] | devnull@localhost |
b84527ea599f8959403e6dcbf05235b153ccf8a0 | 888eaf95c0ec15d5649697a29cfa6973cde91fda | /namebox/venv/bin/easy_install-3.6 | a8c62fbeed84e6cdb3a64e1ca58bf9fc55302953 | [] | no_license | mnojha/namegenerator | e153b169f42c418a0ef73b1efedf68755305a912 | b13a9ad50d49c0efdf5cdcd17af0462c716589f9 | refs/heads/master | 2020-04-02T13:55:13.050458 | 2018-10-29T13:08:12 | 2018-10-29T13:08:12 | 154,502,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | 6 | #!/home/lap01/workspace/namebox/venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | |
dbb2d02825e29858e89048ca1518fffac13e6b25 | ba160f9c149a0898213426f5472896b5c65c85cd | /linearizeWtrack.py | 1a2061064465a8610e3a955f2b59e195c40b522a | [] | no_license | afcarl/MoG_tools | 652f9b69ec4ad8b6929c3e151a15ec3d752ae658 | f26efde88eff46a756bc534486ac96f0a1c68413 | refs/heads/master | 2020-03-18T07:20:15.235751 | 2017-09-22T14:32:33 | 2017-09-22T14:32:33 | 134,445,693 | 1 | 0 | null | 2018-05-22T16:44:05 | 2018-05-22T16:44:04 | null | UTF-8 | Python | false | false | 10,616 | py | import scipy.io as _sio
import pickle
#import ExperimentalMarkContainer as EMC
import filter as _flt
from os import listdir
from os.path import isdir, join
import EnDedirs as _edd
from filter import gauKer
import utilities as _U
# The animal tends to spend much of its time in arms 1, 3, 5
# At least for well-trained animals, animals also do not turn around in
# arms 1, 3, 5 very much. We use
# for bond day4, ex = 2, 4, 6
# for day3, ex
animals = [["bon", "bond", "Bon"], ["fra", "frank", "Fra"], ["gov", "GovernmentData", "Gov"]]
#basedir = "/Volumes/Seagate Expansion Drive"
#basedir = "/Volumes/ExtraDisk/LorenData"
#basedir = "/Users/arai/TEMP/LorenData"
basedir = "/Users/arai/usb/nctc/Workspace/EnDe/LORENDATA"
exf("linearize_funcs.py")
# home well, choice point left well, left corner right well, right corner
landmarks = _N.empty((6, 2))
Nsgs = 5
segs = _N.empty((Nsgs, 2, 2))
length = _N.empty(Nsgs)
offset = _N.array([0, 1, 2, 1, 2])
gkRWD = gauKer(5)
gkRWD /= _N.sum(gkRWD)
# regular lindist # 0 to 3
# lin_inout # inbound outbound 0 to 6
# lin_lr # -3 to 3
# lin_lr_inout # -3 to 3
ii = 0
anim1 = None
anim2 = None
day = None
ep = None
r = None
def onclick(event):
global ix, iy, an, day, ep
global ii
ix, iy = event.xdata, event.ydata
print 'x = %d, y = %d'%(
ix, iy)
global coords
coords = [ix, iy]
landmarks[ii, 0] = ix
landmarks[ii, 1] = iy
ii += 1
if ii == 6:
done()
seg_ts = None
inout = None # inbound - outbound
a_inout = None # inbound - outbound
lr = None
fspd = None
lindist = None # linearization with no left-right
raw_lindist = None # linearization with no left-right
lin_lr = None
lin_inout= None
lin_lr_inout = None
scxMin = None
scxMax = None
scyMin = None
scyMax = None
def done():
"""
come here after 6 landmarks chosen
"""
global r, seg_ts, segs, Nsgs, inout, a_inout, lindist, lin_lr, lin_inout, lin_lr_inout, lr, raw_lindist
global scxMin, scxMax, scyMin, scyMax
global an, day, ep
hdir = _N.empty(2)
vdir = _N.empty(2)
linp = _N.empty(2)
"""
L5 L0 L3
|| || ||
|| || ||
5 1 3
|| || ||
|| || ||
L4===4===L1===2===L2
"""
scxMin, scxMax, scyMin, scyMax = get_boundaries(r)
segs_from_landmarks(segs, landmarks, length)
e = inout_dir(segs, Nsgs)
a_s, b_s, c_s = slopes_of_segs(segs)
_plt.plot([segs[0, 0, 0], segs[0, 1, 0]], [segs[0, 0, 1], segs[0, 1, 1]], lw=3, color="black")
_plt.plot([segs[1, 0, 0], segs[1, 1, 0]], [segs[1, 0, 1], segs[1, 1, 1]], lw=3, color="black")
_plt.plot([segs[2, 0, 0], segs[2, 1, 0]], [segs[2, 0, 1], segs[2, 1, 1]], lw=3, color="black")
_plt.plot([segs[3, 0, 0], segs[3, 1, 0]], [segs[3, 0, 1], segs[3, 1, 1]], lw=3, color="black")
_plt.plot([segs[4, 0, 0], segs[4, 1, 0]], [segs[4, 0, 1], segs[4, 1, 1]], lw=3, color="black")
segsr = segs.reshape((10, 2))
clrs = ["blue", "orange", "red", "green", "yellow", "black", "brown"]
fillin_unobsvd(r)
N = r.shape[0]
seg_ts = _N.empty(N, dtype=_N.int)
lindist = _N.empty(N)
lin_lr = _N.empty(N)
lin_inout = _N.empty(N)
lin_lr_inout = _N.empty(N)
lr = _N.ones(N, dtype=_N.int) * -3
inout = _N.empty(N, dtype=_N.int)
a_inout = _N.empty(N)
gk = gauKer(30)
gk /= _N.sum(gk)
fx = _N.convolve(0.5*(r[:, 1] + r[:, 3]), gk, mode="same")
fy = _N.convolve(0.5*(r[:, 2] + r[:, 4]), gk, mode="same")
xp = fx
yp = fy
xpyp = _N.empty((N, 2))
xpyp[:, 0] = xp
xpyp[:, 1] = yp
_xpyp = _N.repeat(xpyp, Nsgs*2, axis=0)
rxpyp = _xpyp.reshape((N, Nsgs*2, 2))
dv = segsr - rxpyp
dists = _N.sum(dv*dv, axis=2) # closest point on maze from field points
rdists = dists.reshape((N, Nsgs, 2))
print rdists.shape
online = _N.empty(Nsgs, dtype=bool)
mins = _N.empty(Nsgs)
for n in xrange(N):
x0 = xpyp[n, 0]
y0 = xpyp[n, 1]
# xcs, ycs: pt on all line segs closest to x0, y0 (may b byond endpts)
xcs = (b_s*(b_s*x0 - a_s*y0) - a_s*c_s) / (a_s*a_s + b_s*b_s)
ycs = (-a_s*(b_s*x0 - a_s*y0) - b_s*c_s) / (a_s*a_s + b_s*b_s)
find_clsest(n, x0, y0, segs, rdists, seg_ts, Nsgs, online, offset, xcs, ycs, mins, linp)
# fig = _plt.figure()
# _plt.plot(seg_ts)
# clean_seg_ts(seg_ts)
# _plt.plot(seg_ts)
raw_lindist = _N.zeros(N)
lindist_x0y0(N, xpyp, segs, rdists, seg_ts, Nsgs, online, offset, a_s, b_s, c_s, mins, linp, raw_lindist)
smooth_lindist(raw_lindist, lindist)
# fig = _plt.figure(figsize=(10, 4))
# _plt.plot(lindist)
# gk = gauKer(8) # don't want to make this too large. if we just pass through the choice point, we can miss it.
# gk /= _N.sum(gk)
# flindist = _N.convolve(lindist, gk, mode="same")
# lindist = flindist
# rm_lindist_jumps(N, lindist, seg_ts)
fig = _plt.figure(figsize=(10, 4))
_plt.plot(lindist)
spd_thr = 0.35
a_inout_x0y0(N, a_inout, inout, r, seg_ts, spd_thr, e)
#_plt.plot([x0, x0], [y0, y0], ms=10, marker=".", color=clr)
make_lin_inout(N, lindist, inout, lin_inout)
make_lin_lr(N, lr, lindist, seg_ts, r)
build_lin_lr_inout(N, lin_lr_inout, lindist, lr, inout, gkRWD)
# inout
cp_lr, cp_inout = cpify_LR_inout(lr, inout)
sday = ("0%d" % day) if (day < 10) else ("%d" % day)
fn = _edd.datFN("lindist.dat", dir="linearize/%(an)s%(dy)s0%(ep)d" % {"dy" : sday, "ep" : (ep+1), "an" : anim2}, create=True)
_N.savetxt(fn, lindist, fmt="%.3f")
fn = _edd.datFN("cp_lr.dat", dir="linearize/%(an)s%(dy)s0%(ep)d" % {"dy" : sday, "ep" : (ep+1), "an" : anim2})
_U.savetxtWCom(fn, cp_lr, fmt="%d %d", com=("# N=%d. 1st column time, 2nd column - inout value from this time until time in next row" % N))
fn = _edd.datFN("cp_inout.dat", dir="linearize/%(an)s%(dy)s0%(ep)d" % {"dy" : sday, "ep" : (ep+1), "an" : anim2})
_U.savetxtWCom(fn, cp_inout, fmt="%d %d", com=("# N=%d. 1st column time, 2nd column - inout value from this time until time in next row" % N))
fn = _edd.datFN("lin_lr_inout.dat", dir="linearize/%(an)s%(dy)s0%(ep)d" % {"dy" : sday, "ep" : (ep+1), "an" : anim2})
_N.savetxt(fn, lin_lr_inout, fmt="%.3f")
"""
"""
t0 = 0
winsz = 1000
t1 = 0
iw = -1
while t1 < N:
iw += 1
t0 = iw*winsz
t1 = (iw+1)*winsz if (iw+1)*winsz < N else N-1
#btwnfigs(anim2, day, ep, t0, t1, inout, [-1.1, 1.1], seg_ts+1, [0.9, 5.1], r, 1, 2, scxMin, scxMax, scyMin, scyMax)
btwnfigs(anim2, day, ep, t0, t1, inout, "INOUT", [-1.1, 1.1], lr, "LR", [-1.1, 1.1], lin_lr_inout, "lin_lr_inout", [-6.1, 6.1], r, 1, 2, scxMin, scxMax, scyMin, scyMax)
#btwnfigs(anim2, day, ep, t0, t1, lindist, [-0.1, 3.1], seg_ts+1, [0.9, 5.1], r, 1, 2, scxMin, scxMax, scyMin, scyMax)
############################################
for an in animals[0:1]:
anim1 = an[0]
anim2 = an[1]
anim3 = an[2]
#for day in xrange(0, 12):
#for day in xrange(10, 11):
for day in xrange(5, 6):
sdy = ("0%d" % day) if (day < 10) else "%d" % day
frip = "%(bd)s/%(s3)s/%(s1)sripplescons%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
flnp = "%(bd)s/%(s3)s/%(s1)slinpos%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
frwp = "%(bd)s/%(s3)s/%(s1)srawpos%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
if os.access("%s" % frip, os.F_OK):
rip = _sio.loadmat(frip) # load matlab .mat files
mLp = _sio.loadmat(flnp)
mRp = _sio.loadmat(frwp)
ex = rip["ripplescons"].shape[1] - 1
_pts = mLp["linpos"][0,ex]
#for epc in range(0, _pts.shape[1], 2):
for epc in range(4, 5):
ep=epc+1;
# experimental data mark, position container
# frip = "%(bd)s/Dropbox (EastWestSideHippos)/BostonData/%(s3)s/%(s1)sripplescons%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
# flnp = "%(bd)s/Dropbox (EastWestSideHippos)/BostonData/%(s3)s/%(s1)slinpos%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
# frwp = "%(bd)s/Dropbox (EastWestSideHippos)/BostonData/%(s3)s/%(s1)srawpos%(sdy)s.mat" % {"s1" : anim1, "sdy" : sdy, "s3" : anim3, "bd" : basedir}
# these are in seconds
# episodes 2, 4, 6
# seg 1->2->3
# seg 1->4->5
#%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%% Linearization %%%%
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
_pts=mLp["linpos"][0,ex][0,ep]
if (_pts.shape[1] > 0): # might be empty epoch
pts = _pts["statematrix"][0][0]["time"][0,0].T[0]
a = mLp["linpos"][0,ex][0,ep]["statematrix"][0,0]["segmentIndex"]
r = mRp["rawpos"][0,ex][0,ep]["data"][0,0]
zrrp = _N.where((r[:, 1] > 0) & (r[:, 2] > 0) & (r[:, 3] > 0) & (r[:, 4] > 0))[0]
szrrp = zrrp[::4]
fig = _plt.figure()
_plt.scatter(0.5*(r[szrrp, 1]+r[szrrp, 3]), 0.5*(r[szrrp, 2] + r[szrrp, 4]), s=3, color="grey")
cid = fig.canvas.mpl_connect('button_press_event', onclick)
# for each instant in time
# calculate line closest, and the velocity vector
# use lindist at point closest.
# at each point on line closest, we have a unit vector for inbound, outbound
# at each time point, we get a (x_closest, +/- 1)
#
N = r.shape[0]
# landmarks is
# segs
#segs -
#cr[:, 0] = 0.5*(r[zrrp, 1]+r[zrrp, 3])
#cr[:, 1] = 0.5*(r[zrrp, 2]+r[zrrp, 4])
# 5 segments, 2 points
# crds = N x 2
| [
"[email protected]"
] | |
5fb093f8c41cb92f675cd2da841b33b45f64efcf | b50b37c18bd5b34e30766ce2f5813d02a700604b | /page_locators/orderbook_page_locator.py | c5ad42d691b786404a8302b9a1c3454b60d47889 | [] | no_license | Hanlen520/app_automation_sh | d1d7ea6250e7e8b1fdb3a6f01340010e3a3a1d40 | 3fa2f30b644de5ea46cb21e4e6c10e90aab73779 | refs/heads/master | 2023-03-15T18:15:14.279001 | 2020-02-10T02:36:00 | 2020-02-10T02:36:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,786 | py | # -*- coding: utf-8 -*-
"""
=================================
Author: keen
Created on: 2019/11/5
E-mail:[email protected]
=================================
"""
from appium.webdriver.common.mobileby import MobileBy
class OrderBookPageLocator(object):
# 账单按钮
order_nav_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("账单")'
# 账单状态
order_list_status_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_status"
# 账单列表的支付成功
pay_success_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("支付成功")'
# 账单列表的全额退款
refund_all_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("全额退款")'
# 账单列表的部分退款
refund_part_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("部分退款")'
# 账单列表的支付方式图标
order_list_payment_img_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/iv_show"
# 账单列表的日期控件
order_list_date_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_date"
# 账单列表的金额控件
order_list_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_bill"
# 账单列表的收款门店控件
order_list_shop_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_remark"
# 账单列表收银员名称控件
order_list_merchant_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_shouyinyuan"
# 账单列表交易时间控件
order_list_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_time"
# 账单列表退款金额控件
order_list_refund_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_refund_bill"
# 账单详情页的金额控件
order_detail_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_money"
# 账单详情页的支付方式控件
order_detail_pyment_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_fanghsi"
# 账单详情页的订单时间控件
order_detail_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_time"
# 账单详情页的订单号控件
order_detail_id_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_dingdan"
# 账单详情页的支付状态
order_detail_status_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_type"
# 订单详情页的收银员名称
order_detail_merchant_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_details_name"
# 订单详情页的订单备注内容
order_detail_note_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_show_beizhu"
# 账单备注按钮
order_note_button_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/rel_order_note"
# 账单备注输入框
order_note_input_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/et_order_note"
# 账单备注的保存按钮
order_note_save_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/btn_save"
# 账单列表的返回按钮
order_list_back_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/back_iv"
# 账单详情页的退款按钮
refund_button_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_flow_refund"
# 退款金额输入框
edit_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/edit_amount"
# 支付密码输入框 先点击再输入
edit_pay_password_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/edit_password"
# 确定按钮
refund_confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/yes"
# 取消按钮
refund_no_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/no"
# 订单详情页的返回按钮
detail_back_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/iv_flow_back"
# 账单列表页的筛选按钮
screen_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_title_zhangdan"
# 本月按钮
this_month_button_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("本月")'
# 近7天按钮
near_seven_days_button_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("近7天")'
# 近24小时
near_twenty_four_hours_button_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("近24小时")'
# 开始时间按钮
start_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_start_time"
# 确定按钮
time_confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/btnSubmit"
# 取消按钮
time_cancel_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/btnCancel"
# 结束时间按钮
end_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_end_time"
# 门店选择按钮
store_choose_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_store"
# 具体的门店
# 全部门店
all_store_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/cb_all_store"
# 具体名称
# store_name_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().text("吉野家")'
# 确定按钮
store_confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_sure"
# 终端选择按钮
terminal_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_equipment"
# 选择某个终端,所有终端都是相同的id
# terminal_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/rel_check"
# 重置按钮
terminal_reset_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_terminal_reset"
# 确定按钮
terminal_confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_terminal_confirm"
# 支付方式
# 全部
payment_method_all_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("全部")'
# 微信
payment_method_wechat_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.' \
'jiutongshanghu:id/tv_state").text("微信")'
# 支付宝
payment_method_alipay_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("支付宝")'
# 刷卡
payment_method_pos_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("刷卡")'
# 预授权
payment_method_auth_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("预授权")'
# 其他
payment_method_other_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("其他")'
# 支付状态
# 全部订单
status_all_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu:' \
'id/tv_state").text("全部订单")'
# 收款成功
status_success_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu' \
':id/tv_state").text("收款成功")'
# 已退款
status_refund_loc = MobileBy.ANDROID_UIAUTOMATOR, 'new UiSelector().resourceId("com.cashier.jiutongshanghu' \
':id/tv_state").text("退款成功")'
# 重置按钮
reset_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_reset"
# 确定按钮
confirm_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/nct_confirm"
# 筛选结果中的起始时间
screen_result_start_end_time = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_filter_time"
# 筛选结果中门店名称
screen_result_store_name_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_mendian_select"\
# 筛选结果中的支付方式
screen_result_payment_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_pay_way"
# 筛选结果中的收款笔数
screen_result_success_num_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_success_count"
# 筛选结果中的退款笔数
screen_result_refund_num_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_refund_count"
# 筛选结果中的收银终端
screen_result_terminal_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_receive_people"
# 筛选结果中的支付状态
screen_result_status_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_pay_status"
# 筛选结果中的收款金额
screen_result_success_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_success_money"
# 筛选结果中的退款金额
screen_result_refund_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_refund_money"
# 筛选结果中每条数据的支付状态
screen_result_list_status_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_status"
# 筛选结果中每条数据的收款金额
screen_result_list_success_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_bill"
# 筛选结果中每条数据的退款金额
screen_result_list_refund_amount_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_refund_bill"
# 筛选结果中每条数据的更新时间
screen_result_list_time_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/tv_time"
# 筛选结果页的返回按钮
# screen_back_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/iv_back"
screen_back_loc = MobileBy.ID, "com.cashier.jiutongshanghu:id/back_iv"
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.