blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20891c001bfbe780b1f4865470d6788401eefa16 | ce196aba0adde47ea2767eae1d7983a1ef548bb8 | /求n,m的最小公倍数.py | 8ff00c68cb9f7a31cebbfb654fe4a258fce95226 | [] | no_license | xiang-daode/Python3_codes | 5d2639ffd5d65065b98d029e79b8f3608a37cf0b | 06c64f85ce2c299aef7f9311e9473e0203a05b09 | refs/heads/main | 2023-08-30T14:59:55.123128 | 2021-11-03T05:12:24 | 2021-11-03T05:12:24 | 333,632,892 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 296 | py | # 计算n,m的最小公倍数
def gbs(x,y):
if x>y:
z=y
else:
z=x
f=1
for i in range(z-1,2,-1):
if(x%i==0 and y%i==0):
f=f*i
x=x/i
y=y/i
f=f*x*y
print(f)
n=int(input())
m=int(input())
gbs(n,m) | [
"[email protected]"
] | |
3d6ade2c6dbc770b827d650cd0b41c4c9c3b901e | d9ccb2b8e549a594bf06868391481ea8669786ea | /migrations/versions/9320d2f7765b_add_source_file.py | c10af5965a24672f6ca7c873edbb88c525eefb10 | [
"Apache-2.0"
] | permissive | clld/dogonlanguages | 00dd3895dffbb99c048f0d0a8970d6cd4199ff5c | 2b0b510e853b77c9e356a9c73142401afc93b04a | refs/heads/master | 2022-12-13T02:33:10.590590 | 2022-12-02T08:34:24 | 2022-12-02T08:34:24 | 25,243,999 | 1 | 2 | Apache-2.0 | 2021-12-07T13:25:46 | 2014-10-15T07:39:00 | Python | UTF-8 | Python | false | false | 858 | py | # coding=utf-8
"""add source file
Revision ID: 9320d2f7765b
Revises: 1770d17056aa
Create Date: 2017-05-05 09:58:20.128175
"""
from alembic import op
from clld.db.migration import Connection
from clld.db.models.common import Source, Source_files
# revision identifiers, used by Alembic.
revision = '9320d2f7765b'
down_revision = '1770d17056aa'
def upgrade():
conn = Connection(op.get_bind())
spk = conn.pk(Source, 'heathetal2015')
conn.insert(
Source_files,
jsondata={
"thumbnail": None,
"web": None,
"size": 7531008,
"objid": "EAEA0-C97A-A1D2-2E76-0",
"original": "a.xls"},
id='heathetal2015-1',
name='Dogon.comp.vocab.UNICODE.xls',
ord=1,
mime_type='application/vnd.ms-excel',
object_pk=spk)
def downgrade():
pass
| [
"[email protected]"
] | |
e5a27eaa219e0fde7041e68c4eb80d954a19f87a | 66c6f9a24c9a1f912e93f96b439b81a10cffac77 | /test/vanilla/Expected/AcceptanceTests/BodyBoolean/bodyboolean/__init__.py | 9582ec858066aa9339d3ef1d5f01d189ad7214ef | [
"MIT",
"LicenseRef-scancode-generic-cla"
] | permissive | kairu-ms/autorest.python | 5dd0e8bf2ebf0c0dc148342003899fabd269f946 | 20870e3870fcfeae9567b63343d2320bf388f3c6 | refs/heads/master | 2023-04-29T23:00:50.568945 | 2020-01-17T18:03:00 | 2020-01-17T18:03:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 737 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from ._configuration import AutoRestBoolTestServiceConfiguration
from ._auto_rest_bool_test_service import AutoRestBoolTestService
__all__ = ['AutoRestBoolTestService', 'AutoRestBoolTestServiceConfiguration']
from .version import VERSION
__version__ = VERSION
| [
"[email protected]"
] | |
2435d771630538e9959dd54e81aaf11fc02774d0 | 87b7ec1af5bde5aa46f1982008aecec00ca00c1d | /conf.py | 027ac88796a93502408f1ac60968deead521e278 | [
"MIT"
] | permissive | kattni/Adafruit_CircuitPython_VS1053 | ca656e64a83f74e398bbc0ad21d8c9fd27614270 | 20d5ac7f71117b8bdd4db75678ce98e3a6b19e49 | refs/heads/master | 2020-03-31T09:50:02.930951 | 2018-10-09T01:39:32 | 2018-10-09T01:39:32 | 152,112,523 | 0 | 0 | null | 2018-10-08T16:34:31 | 2018-10-08T16:34:31 | null | UTF-8 | Python | false | false | 4,600 | py | # -*- coding: utf-8 -*-
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
]
intersphinx_mapping = {'python': ('https://docs.python.org/3.4', None),'BusDevice': ('https://circuitpython.readthedocs.io/projects/bus_device/en/latest/', None),'CircuitPython': ('https://circuitpython.readthedocs.io/en/latest/', None)}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'README'
# General information about the project.
project = u'Adafruit VS1053 Library'
copyright = u'2017 Tony DiCola'
author = u'Tony DiCola'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.0'
# The full version, including alpha/beta/rc tags.
release = u'1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
default_role = "any"
# If true, '()' will be appended to :func: etc. cross-reference text.
#
add_function_parentheses = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'AdafruitVS1053Librarydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AdafruitVS1053Library.tex', u'Adafruit VS1053 Library Documentation',
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'adafruitVS1053library', u'Adafruit VS1053 Library Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AdafruitVS1053Library', u'Adafruit VS1053 Library Documentation',
author, 'AdafruitVS1053Library', 'One line description of project.',
'Miscellaneous'),
]
| [
"[email protected]"
] | |
7606b5f0032344a49b091eeaf649d06bc041c27a | 3dc3bbe607ab7b583eb52dbaae86636eb642960a | /mmaction/models/backbones/resnet_audio.py | fd3a520e927f7efcdddbf8c512ea00f787ad6fbd | [
"Apache-2.0"
] | permissive | open-mmlab/mmaction2 | 659c36c6083fd3d9d072e074a8d4b3a50342b9bd | 582b78fd6c3240500d5cacd292339d7d1ddbb056 | refs/heads/main | 2023-08-28T18:14:50.423980 | 2023-08-10T09:20:06 | 2023-08-10T09:20:06 | 278,810,244 | 3,498 | 1,028 | Apache-2.0 | 2023-09-07T06:50:44 | 2020-07-11T07:19:10 | Python | UTF-8 | Python | false | false | 14,308 | py | # Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence
import torch
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmengine.logging import MMLogger
from mmengine.model.weight_init import constant_init, kaiming_init
from mmengine.runner import load_checkpoint
from mmengine.utils.dl_utils.parrots_wrapper import _BatchNorm
from torch.nn.modules.utils import _ntuple
from mmaction.registry import MODELS
from mmaction.utils import ConfigType
class Bottleneck2dAudio(nn.Module):
"""Bottleneck2D block for ResNet2D.
Args:
inplanes (int): Number of channels for the input in first conv3d layer.
planes (int): Number of channels produced by some norm/conv3d layers.
stride (int): Stride in the conv layer. Defaults to 2.
dilation (int): Spacing between kernel elements. Defaults to 1.
downsample (nn.Module, optional): Downsample layer. Defaults to None.
factorize (bool): Whether to factorize kernel. Defaults to True.
norm_cfg (dict): Config for norm layers. required keys are ``type`` and
``requires_grad``. Defaults to None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the trgaining speed. Defaults to False.
"""
expansion = 4
def __init__(self,
inplanes: int,
planes: int,
stride: int = 2,
dilation: int = 1,
downsample: Optional[nn.Module] = None,
factorize: bool = True,
norm_cfg: ConfigType = None,
with_cp: bool = False) -> None:
super().__init__()
self.inplanes = inplanes
self.planes = planes
self.stride = stride
self.dilation = dilation
self.factorize = factorize
self.norm_cfg = norm_cfg
self.with_cp = with_cp
self.conv1_stride = 1
self.conv2_stride = stride
conv1_kernel_size = (1, 1)
conv1_padding = 0
conv2_kernel_size = (3, 3)
conv2_padding = (dilation, dilation)
self.conv1 = ConvModule(
inplanes,
planes,
kernel_size=conv1_kernel_size,
padding=conv1_padding,
dilation=dilation,
norm_cfg=self.norm_cfg,
bias=False)
self.conv2 = ConvModule(
planes,
planes,
kernel_size=conv2_kernel_size,
stride=stride,
padding=conv2_padding,
dilation=dilation,
bias=False,
conv_cfg=dict(type='ConvAudio') if factorize else dict(
type='Conv'),
norm_cfg=None,
act_cfg=None)
self.conv3 = ConvModule(
2 * planes if factorize else planes,
planes * self.expansion,
kernel_size=1,
bias=False,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The output of the module.
"""
def _inner_forward(x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
out = self.relu(out)
return out
@MODELS.register_module()
class ResNetAudio(nn.Module):
"""ResNet 2d audio backbone. Reference:
<https://arxiv.org/abs/2001.08740>`_.
Args:
depth (int): Depth of resnet, from ``{50, 101, 152}``.
pretrained (str, optional): Name of pretrained model. Defaults to None.
in_channels (int): Channel num of input features. Defaults to 1.
base_channels (int): Channel num of stem output features.
Defaults to 32.
num_stages (int): Resnet stages. Defaults to 4.
strides (Sequence[int]): Strides of residual blocks of each stage.
Defaults to ``(1, 2, 2, 2)``.
dilations (Sequence[int]): Dilation of each stage.
Defaults to ``(1, 1, 1, 1)``.
conv1_kernel (int): Kernel size of the first conv layer. Defaults to 9.
conv1_stride (Union[int, Tuple[int]]): Stride of the first conv layer.
Defaults to 1.
frozen_stages (int): Stages to be frozen (all param fixed). -1 means
not freezing any parameters. Defaults to -1.
factorize (Sequence[int]): factorize Dims of each block for audio.
Defaults to ``(1, 1, 0, 0)``.
norm_eval (bool): Whether to set BN layers to eval mode, namely, freeze
running stats (mean and var). Defaults to False.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Defaults to False.
conv_cfg (Union[dict, ConfigDict]): Config for norm layers.
Defaults to ``dict(type='Conv')``.
norm_cfg (Union[dict, ConfigDict]): Config for norm layers. required
keys are ``type`` and ``requires_grad``.
Defaults to ``dict(type='BN2d', requires_grad=True)``.
act_cfg (Union[dict, ConfigDict]): Config for activate layers.
Defaults to ``dict(type='ReLU', inplace=True)``.
zero_init_residual (bool): Whether to use zero initialization
for residual block. Defaults to True.
"""
arch_settings = {
# 18: (BasicBlock2dAudio, (2, 2, 2, 2)),
# 34: (BasicBlock2dAudio, (3, 4, 6, 3)),
50: (Bottleneck2dAudio, (3, 4, 6, 3)),
101: (Bottleneck2dAudio, (3, 4, 23, 3)),
152: (Bottleneck2dAudio, (3, 8, 36, 3))
}
def __init__(self,
depth: int,
pretrained: str = None,
in_channels: int = 1,
num_stages: int = 4,
base_channels: int = 32,
strides: Sequence[int] = (1, 2, 2, 2),
dilations: Sequence[int] = (1, 1, 1, 1),
conv1_kernel: int = 9,
conv1_stride: int = 1,
frozen_stages: int = -1,
factorize: Sequence[int] = (1, 1, 0, 0),
norm_eval: bool = False,
with_cp: bool = False,
conv_cfg: ConfigType = dict(type='Conv'),
norm_cfg: ConfigType = dict(type='BN2d', requires_grad=True),
act_cfg: ConfigType = dict(type='ReLU', inplace=True),
zero_init_residual: bool = True) -> None:
super().__init__()
if depth not in self.arch_settings:
raise KeyError(f'invalid depth {depth} for resnet')
self.depth = depth
self.pretrained = pretrained
self.in_channels = in_channels
self.base_channels = base_channels
self.num_stages = num_stages
assert 1 <= num_stages <= 4
self.dilations = dilations
self.conv1_kernel = conv1_kernel
self.conv1_stride = conv1_stride
self.frozen_stages = frozen_stages
self.stage_factorization = _ntuple(num_stages)(factorize)
self.norm_eval = norm_eval
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.zero_init_residual = zero_init_residual
self.block, stage_blocks = self.arch_settings[depth]
self.stage_blocks = stage_blocks[:num_stages]
self.inplanes = self.base_channels
self._make_stem_layer()
self.res_layers = []
for i, num_blocks in enumerate(self.stage_blocks):
stride = strides[i]
dilation = dilations[i]
planes = self.base_channels * 2**i
res_layer = self.make_res_layer(
self.block,
self.inplanes,
planes,
num_blocks,
stride=stride,
dilation=dilation,
factorize=self.stage_factorization[i],
norm_cfg=self.norm_cfg,
with_cp=with_cp)
self.inplanes = planes * self.block.expansion
layer_name = f'layer{i + 1}'
self.add_module(layer_name, res_layer)
self.res_layers.append(layer_name)
self.feat_dim = self.block.expansion * self.base_channels * 2**(
len(self.stage_blocks) - 1)
@staticmethod
def make_res_layer(block: nn.Module,
inplanes: int,
planes: int,
blocks: int,
stride: int = 1,
dilation: int = 1,
factorize: int = 1,
norm_cfg: Optional[ConfigType] = None,
with_cp: bool = False) -> nn.Module:
"""Build residual layer for ResNetAudio.
Args:
block (nn.Module): Residual module to be built.
inplanes (int): Number of channels for the input feature
in each block.
planes (int): Number of channels for the output feature
in each block.
blocks (int): Number of residual blocks.
stride (int): Strides of residual blocks of each stage.
Defaults to 1.
dilation (int): Spacing between kernel elements. Defaults to 1.
factorize (Uninon[int, Sequence[int]]): Determine whether to
factorize for each block. Defaults to 1.
norm_cfg (Union[dict, ConfigDict], optional): Config for norm
layers. Defaults to None.
with_cp (bool): Use checkpoint or not. Using checkpoint will save
some memory while slowing down the training speed.
Defaults to False.
Returns:
nn.Module: A residual layer for the given config.
"""
factorize = factorize if not isinstance(
factorize, int) else (factorize, ) * blocks
assert len(factorize) == blocks
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = ConvModule(
inplanes,
planes * block.expansion,
kernel_size=1,
stride=stride,
bias=False,
norm_cfg=norm_cfg,
act_cfg=None)
layers = []
layers.append(
block(
inplanes,
planes,
stride,
dilation,
downsample,
factorize=(factorize[0] == 1),
norm_cfg=norm_cfg,
with_cp=with_cp))
inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(
block(
inplanes,
planes,
1,
dilation,
factorize=(factorize[i] == 1),
norm_cfg=norm_cfg,
with_cp=with_cp))
return nn.Sequential(*layers)
def _make_stem_layer(self) -> None:
"""Construct the stem layers consists of a ``conv+norm+act`` module and
a pooling layer."""
self.conv1 = ConvModule(
self.in_channels,
self.base_channels,
kernel_size=self.conv1_kernel,
stride=self.conv1_stride,
bias=False,
conv_cfg=dict(type='ConvAudio', op='sum'),
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def _freeze_stages(self) -> None:
"""Prevent all the parameters from being optimized before
``self.frozen_stages``."""
if self.frozen_stages >= 0:
self.conv1.bn.eval()
for m in [self.conv1.conv, self.conv1.bn]:
for param in m.parameters():
param.requires_grad = False
for i in range(1, self.frozen_stages + 1):
m = getattr(self, f'layer{i}')
m.eval()
for param in m.parameters():
param.requires_grad = False
def init_weights(self) -> None:
"""Initiate the parameters either from existing checkpoint or from
scratch."""
if isinstance(self.pretrained, str):
logger = MMLogger.get_current_instance()
logger.info(f'load model from: {self.pretrained}')
load_checkpoint(self, self.pretrained, strict=False, logger=logger)
elif self.pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, _BatchNorm):
constant_init(m, 1)
if self.zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck2dAudio):
constant_init(m.conv3.bn, 0)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): The input data.
Returns:
torch.Tensor: The feature of the input samples extracted
by the backbone.
"""
x = self.conv1(x)
for layer_name in self.res_layers:
res_layer = getattr(self, layer_name)
x = res_layer(x)
return x
def train(self, mode: bool = True) -> None:
"""Set the optimization status when training."""
super().train(mode)
self._freeze_stages()
if mode and self.norm_eval:
for m in self.modules():
if isinstance(m, _BatchNorm):
m.eval()
| [
"[email protected]"
] | |
529a5ab95bf0f23e3253b65c5eec0ee18fc952aa | 0455b5da2b6bc9fad7b92f6b99005a8c81cdda97 | /emiratesnbd/items.py | 9d273557032c6fd1404c5a4aab8e83a35ed96782 | [] | no_license | hristo-grudev/emiratesnbd | da22bf3f3023f5bd6b9e230a4df3da5ab61d509c | 48494986f0ed44b15cc1b7b1a425a4d65582cf1f | refs/heads/main | 2023-04-15T00:59:31.067111 | 2021-04-22T06:28:13 | 2021-04-22T06:28:13 | 360,414,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 138 | py | import scrapy
class EmiratesnbdItem(scrapy.Item):
title = scrapy.Field()
description = scrapy.Field()
date = scrapy.Field()
| [
"[email protected]"
] | |
4be32626ca67e776aca8f11478838d70d8e803bb | 5afd733a5c1f753601c69b8b4eae1b49edfbae7c | /201-300/282.py | f1575add29bfe60cd0d5c53e27f8449440e6ebb0 | [] | no_license | yanbinbi/leetcode | 9dcd4a0160be915006455b83d6b7cd39e9819811 | 616a868bfa7bdd00195067b0477b0236a72d23e0 | refs/heads/master | 2021-05-13T19:34:17.222576 | 2017-11-12T02:04:31 | 2017-11-12T02:04:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,279 | py | class Solution(object):
def addOperators(self, num, target):
"""
:type num: str
:type target: int
:rtype: List[str]
"""
self.ret =set()
self.dfs(num, [], target)
return list(self.ret)
def dfs(self, num, arr, target):
size = len(num)
if size == 0:
if len(arr) > 0:
self.cal(arr, target, '', False)
else:
for i in range(1, size+1):
if num[0] != '0' or i == 1:
arr.append(int(num[:i]))
self.dfs(num[i:], arr, target)
arr.pop()
def cal(self, arr, target, exp, reverse):
size = len(arr)
if size == 0 and target == 0:
self.ret.add(exp)
else:
val = arr[0]
exp += str(val)
plus, minus = ('+','-') if not reverse else ('-','+')
for i in range(1,size):
a, b = arr[i], arr[i:]
self.cal(b, target-val, exp+plus, reverse)
self.cal(b, -target+val, exp+minus, not reverse)
val *= a
exp += ('*'+str(a))
if val == target:
self.ret.add(exp)
| [
"[email protected]"
] | |
9d13794ae4997422a14e76c2f9e828ff273b8e4e | 5537eec7f43098d216d2b550678c8d10b2a26f09 | /venv/ansible/lib/python2.7/site-packages/azure/batch/models/task_scheduling_policy.py | 220faadb2936c714ded099065c8ed5afe1731434 | [] | no_license | wipro-sdx/Automation | f0ae1512b8d9d491d7bacec94c8906d06d696407 | a8c46217d0fbe51a71597b5db87cbe98ed19297a | refs/heads/master | 2021-07-08T11:09:05.314435 | 2018-05-02T07:18:54 | 2018-05-02T07:18:54 | 131,812,982 | 0 | 1 | null | 2020-07-23T23:22:33 | 2018-05-02T07:15:28 | Python | UTF-8 | Python | false | false | 1,183 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class TaskSchedulingPolicy(Model):
"""Specifies how tasks should be distributed across compute nodes.
:param node_fill_type: How tasks should be distributed across compute
nodes. Possible values include: 'spread', 'pack', 'unmapped'
:type node_fill_type: str or :class:`ComputeNodeFillType
<azure.batch.models.ComputeNodeFillType>`
"""
_validation = {
'node_fill_type': {'required': True},
}
_attribute_map = {
'node_fill_type': {'key': 'nodeFillType', 'type': 'ComputeNodeFillType'},
}
def __init__(self, node_fill_type):
self.node_fill_type = node_fill_type
| [
"[email protected]"
] | |
e00c4b648f1a238bca577a041f426ddc93bae731 | b01429f27f8d7f4db7e3eba0abbb6be1ea67e2fa | /imageimage1.2/langage.py | dc64ee7d7051ab678f89b67c5e801ac667c1855a | [] | no_license | pastrouveedespeudo/ste-fois-c-la-bonne | 3dce8cdfc6b5523d9651e8ec9a143b7ab7789d21 | 9872c35423870c9854ee0bda120cca0c832c1fc9 | refs/heads/master | 2020-04-20T22:08:34.295196 | 2019-02-17T17:18:36 | 2019-02-17T17:18:36 | 169,129,734 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,165 | py | from mémoire import *
from outils_fichier import *
from politesse import *
from outils_internet import *
#Regle
#ne seront maqué que les truk importants
#dans mémoire str.find la liste, si ya pas on ajoute fais le
#ca fait ia
class langage:
def début1(self):
#début1(self, oInput)
#self.oInput = oInput
politesse.politesse(self)
self.oInput = input("salut")
self.oInput = self.oInput.lower()
langage.exception()
print(self.oInput)
c = 0
for i in self.politesse:
if self.oInput != i:
pass
elif self.oInput == i:
c+=1
if c <= 0:
outils_fichier.ecris_propri(self, self.oInput, "politesse.py")
def exception(self):
exception1 = "ça va"
if self.oInput == "ca va" or self.oInput == "ca va ?" :
self.oInput = self.oInput.replace("c","ç")
elif self.oInput == "hey" or self.oInput == "Hey":
pass
def début1_1(self):
mémoire.liste(self)
liste = []
liste1 = [[],[],[],[],[],[],[],[],[],[],[],[],[]]
self.liste_langage = [] #liste importante
self.ponctu = [] #2eme lsite importante
liste.append(self.oInput)
c = 0
for i in liste:
for j in i:
if j == " ":
c+=1
else:
liste1[c].append(j)
for i in self.liste_ponctuation:
for j in liste1:
if j == [] or j == [""] or j == [" "] or j == [ ]:
pass
else:
j = "".join(j)
if i == j :
langage.début1_1_outils( "?", self.liste_langage, self.intero, i)
langage.début1_1_outils( "!", self.liste_langage, self.excla, i)
langage.début1_1_outils( ".", self.liste_langage, self.point, i)
langage.début1_1_outils( ",", self.liste_langage, self.virgule, i)
langage.début1_1_outils( "'", self.liste_langage, self.guillemet, i)
langage.début1_1_outils( ":", self.liste_langage, self.deuxpoints, i)
langage.début1_1_outils( "?!", self.liste_langage, self.interoexcla, i)
self.ponctu = self.oInput[-1]
self.oInput = self.oInput[:-1]
if self.liste_langage == []:
self.liste_langage.append("aucune ponctuation")
print(self.liste_langage)
def début1_1_outils(self, ponctuation, liste, késako, i):
mémoire.liste(self)
self.i = i
self.liste = liste
self.késako = késako
self.ponctuation = ponctuation
if self.i == self.ponctuation:
self.liste.extend(self.késako)
def début1_reponse(self):
pass
def définition_context_phrase(self):
liste = []
outils_internet.recherche_langage(self, self.oInput, liste)
self.politesse = ""
a = ["forme de politesse", "Forme de politesse", "Marque de politesse",
"Formule de salutation", "salutation", "Salutation", "Formule de politesse"]
for i in a:
formule_politesse = str(liste).find(str(i))
if formule_politesse > 0:
print(self.oInput + " beauté, c deja un bon debut !")
#formule de politesse
self.politesse = True
break
if self.politesse == True:
pass
#passe a la suite sinon
#faut trouvé ce que le pelo raconte
langage = langage()
langage.début1()
langage.début1_1()
langage.définition_context_phrase()
| [
"[email protected]"
] | |
b04ed6d55babef029018dcc05dcb6afed7e6ad71 | 0dc27aeb342b9b898256adf256c5b77a666e68fb | /lm386/codes/coeffs.py | f505630bc52bdbb325464e11d00650e33c721eb4 | [] | no_license | gadepall/EE2230 | 7861b898d90abf04b935cb77b31f2c70e290b096 | 739b01d8f5da93cc5c38121e62ea6d9e87851146 | refs/heads/master | 2020-03-25T20:25:57.885565 | 2019-10-06T01:57:13 | 2019-10-06T01:57:13 | 144,130,660 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,010 | py | import numpy as np
def dir_vec(A,B):
return B-A
def norm_vec(A,B):
return omat@dir_vec(A,B)
#Generate line points
def line_gen(A,B):
len =10
x_AB = np.zeros((2,len))
lam_1 = np.linspace(0,1,len)
for i in range(len):
temp1 = A + lam_1[i]*(B-A)
x_AB[:,i]= temp1.T
return x_AB
#Centre and Radius of the circumcircle
def ccircle(A,B,C):
p = np.zeros(2)
n1 = dir_vec(B,A)
p[0] = 0.5*(np.linalg.norm(A)**2-np.linalg.norm(B)**2)
n2 = dir_vec(C,B)
p[1] = 0.5*(np.linalg.norm(B)**2-np.linalg.norm(C)**2)
#Intersection
N=np.vstack((n1,n2))
O=np.linalg.inv(N)@p
r = np.linalg.norm(A -O)
return O,r
def line_intersect(n1,c1,n2,c2):
N=np.vstack((n1,n2))
p = np.zeros(2)
p[0] = c1
p[1] = c2
P=np.linalg.inv(N)@p
return P
#Intersection
A = np.array([-2,-2])
B = np.array([1,3])
dvec = np.array([-1,1])
omat = np.array([[0,1],[-1,0]])
#AB =np.vstack((A,B)).T
#print (dir_vec(A,B))
#print (norm_vec(A,B))
| [
"[email protected]"
] | |
6f9f7154c397d558289889cac3d1ff8b7cb8991b | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_upstarts.py | a99715c49d6b20f79507a7ced2a14b39179cb8bc | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py |
#calss header
class _UPSTARTS():
def __init__(self,):
self.name = "UPSTARTS"
self.definitions = upstart
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['upstart']
| [
"[email protected]"
] | |
0cb85670c7ca2f434db6351d257e24ca060fa7f1 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/380/usersdata/341/93264/submittedfiles/principal.py | 6cda24a7d78aabf878f6abdc614407b80d6ddb97 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 276 | py | # -*- coding: utf-8 -*-
#COMECE AQUI ABAIXO
n=int(input('Digite a quantidade de notas: '))
notas = []
for i in range (0,n,1):
notas.append(float(input('Digite a nota%d: ' % (i+1))))
media=0
for i in range (0,n,1):
media += notas[i]/n
print(notas[i])
print(media)
| [
"[email protected]"
] | |
279efe43b1dbd7cc075bd5b7c93df9bcdff1d52d | b5e93a09ee136b2b035c9958557e3e4091d8d9fd | /horch/models/attention.py | d13ba6fb39ab10e20773465ea90474516517c923 | [
"MIT"
] | permissive | ccglyyn/pytorch-hrvvi-ext | 2ee0cd27461c344783150535fbadea5fbe29f25b | a020da3543982464ff3888ff84b311e98a130d6d | refs/heads/master | 2022-04-20T13:35:07.561985 | 2020-04-21T10:36:36 | 2020-04-21T10:36:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,883 | py | import torch
from horch.models.modules import Conv2d, HardSigmoid, Identity
from torch import nn as nn
from torch.nn import functional as F
class SEModule(nn.Module):
def __init__(self, in_channels, reduction=8):
super().__init__()
channels = in_channels // reduction
self.pool = nn.AdaptiveAvgPool2d(1)
self.layers = nn.Sequential(
nn.Linear(in_channels, channels),
nn.ReLU(True),
nn.Linear(channels, in_channels),
nn.Sigmoid(),
)
def forward(self, x):
b, c = x.size()[:2]
s = self.pool(x).view(b, c)
s = self.layers(s).view(b, c, 1, 1)
return x * s
class CBAMChannelAttention(nn.Module):
def __init__(self, in_channels, reduction=8):
super().__init__()
channels = in_channels // reduction
self.mlp = nn.Sequential(
nn.Linear(in_channels, channels),
nn.ReLU(True),
nn.Linear(channels, in_channels),
)
def forward(self, x):
b, c = x.size()[:2]
aa = F.adaptive_avg_pool2d(x, 1).view(b, c)
aa = self.mlp(aa)
am = F.adaptive_max_pool2d(x, 1).view(b, c)
am = self.mlp(am)
a = torch.sigmoid(aa + am).view(b, c, 1, 1)
return x * a
class CBAMSpatialAttention(nn.Module):
def __init__(self):
super().__init__()
self.conv = Conv2d(2, 1, kernel_size=7, norm_layer='bn')
def forward(self, x):
aa = x.mean(dim=1, keepdim=True)
am = x.max(dim=1, keepdim=True)[0]
a = torch.cat([aa, am], dim=1)
a = torch.sigmoid(self.conv(a))
return x * a
class CBAM(nn.Module):
def __init__(self, in_channels, reduction=4):
super().__init__()
self.channel = CBAMChannelAttention(in_channels, reduction)
self.spatial = CBAMSpatialAttention()
def forward(self, x):
x = self.channel(x)
x = self.spatial(x)
return x
class SELayerM(nn.Module):
def __init__(self, in_channels, reduction=4):
super().__init__()
channels = in_channels // reduction
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.layers = nn.Sequential(
nn.Linear(in_channels, channels),
nn.ReLU6(True),
nn.Linear(channels, in_channels),
HardSigmoid(True),
)
def forward(self, x):
b, c = x.size()[:2]
s = self.avgpool(x).view(b, c)
s = self.layers(s).view(b, c, 1, 1)
return x * s
def get_attention(name, **kwargs):
if not name:
return Identity()
name = name.lower()
if name == 'se':
return SEModule(**kwargs)
elif name == 'sem':
return SELayerM(**kwargs)
elif name == 'cbam':
return CBAM(**kwargs)
else:
raise NotImplementedError("No attention module named %s" % name) | [
"[email protected]"
] | |
f9939dba69a49fcceaa92a28b7e0708a772e5a5d | 51891febfc6247af3fe5c39b3063d1f1995a0173 | /src/scatter3d_demo.py | 50e8fbaac07658778552197aa7e1ff424f28fe21 | [] | no_license | jim1949/car_controller | 4ab391eef29e46563853bc3d54a06a6c4a0714c4 | f2053ddde429dbdef39261d24197f3bc7936166f | refs/heads/master | 2020-12-25T14:23:37.339889 | 2016-09-13T09:55:07 | 2016-09-13T09:55:07 | 67,448,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,068 | py | # import numpy as np
# from mpl_toolkits.mplot3d import Axes3D
# import matplotlib.pyplot as plt
# # def randrange(n, vmin, vmax):
# # return (vmax - vmin)*np.random.rand(n) + vmin
# # fig = plt.figure()
# # ax = fig.add_subplot(111, projection='3d')
# # n = 100
# # for c, m, zl, zh in [('r', 'o', -50, -25), ('b', '^', -30, -5)]:
# # xs = randrange(n, 23, 32)
# # ys = randrange(n, 0, 100)
# # zs = randrange(n, zl, zh)
# # ax.scatter(xs, ys, zs, c=c, marker=m)
# xs=np.array((range(1,100)))
# ys=np.array((range(1,100)))
# zs=np.array((range(1,100)))
# Axes3D.scatter(xs, ys, zs, zdir='z', c='b')
# ax.set_xlabel('X Label')
# ax.set_ylabel('Y Label')
# ax.set_zlabel('Z Label')
# plt.show()
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x =[1,2,3,4,5,6,7,8,9,10]
y =[5,6,2,3,13,4,1,2,4,8]
z =[2,3,3,3,5,7,9,11,9,10]
ax.scatter(x, y, z, c='r', marker='o')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
plt.show()
| [
"[email protected]"
] | |
1891a0d48660d0b40e0a7143031cddabaaaca6d6 | c1267fbec95318184e7388cddf9b7085f797d514 | /2023/03 March/db03242023.py | c6859abc43ce48d34df3d2d5d2e620c5251009bd | [
"MIT"
] | permissive | vishrutkmr7/DailyPracticeProblemsDIP | 1aedfd2e173847bf22989a6b0ec550acebb2bd86 | 2c365f633a1e1bee281fbdc314969f03b17ac9ec | refs/heads/master | 2023-05-31T23:49:52.135349 | 2023-05-28T09:32:12 | 2023-05-28T09:32:12 | 199,596,248 | 10 | 4 | MIT | 2022-11-02T21:31:59 | 2019-07-30T07:12:46 | Python | UTF-8 | Python | false | false | 563 | py | """
Given positive an integer num, return whether or not it is a perfect square.
Ex: Given the following num...
num = 9, return true.
Ex: Given the following num...
num = 18, return false.
"""
class Solution:
def isPerfectSquare(self, num: int) -> bool:
r = num
while r**2 > num:
r = (r + num / r) // 2
return r**2 == num
# Test Cases
if __name__ == "__main__":
solution = Solution()
assert solution.isPerfectSquare(9) is True
assert solution.isPerfectSquare(18) is False
print("All tests passed.")
| [
"[email protected]"
] | |
8295157610a4f2105ed98d1ae6239095adf384e2 | a2d36e471988e0fae32e9a9d559204ebb065ab7f | /huaweicloud-sdk-vpcep/huaweicloudsdkvpcep/v1/model/update_endpoint_service_request_body.py | 3bc8d522210d8b68191c685c6dd1771204d35e22 | [
"Apache-2.0"
] | permissive | zhouxy666/huaweicloud-sdk-python-v3 | 4d878a90b8e003875fc803a61414788e5e4c2c34 | cc6f10a53205be4cb111d3ecfef8135ea804fa15 | refs/heads/master | 2023-09-02T07:41:12.605394 | 2021-11-12T03:20:11 | 2021-11-12T03:20:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,291 | py | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class UpdateEndpointServiceRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'approval_enabled': 'bool',
'service_name': 'str',
'ports': 'list[PortList]',
'port_id': 'str',
'vip_port_id': 'str'
}
attribute_map = {
'approval_enabled': 'approval_enabled',
'service_name': 'service_name',
'ports': 'ports',
'port_id': 'port_id',
'vip_port_id': 'vip_port_id'
}
def __init__(self, approval_enabled=None, service_name=None, ports=None, port_id=None, vip_port_id=None):
"""UpdateEndpointServiceRequestBody - a model defined in huaweicloud sdk"""
self._approval_enabled = None
self._service_name = None
self._ports = None
self._port_id = None
self._vip_port_id = None
self.discriminator = None
if approval_enabled is not None:
self.approval_enabled = approval_enabled
if service_name is not None:
self.service_name = service_name
if ports is not None:
self.ports = ports
if port_id is not None:
self.port_id = port_id
if vip_port_id is not None:
self.vip_port_id = vip_port_id
@property
def approval_enabled(self):
"""Gets the approval_enabled of this UpdateEndpointServiceRequestBody.
是否需要审批。 ● false:不需审批,创建的终端节点连 接直接为accepted状态。 ● true:需审批,创建的终端节点连接 需要终端节点服务所属用户审核后方 可使用。 默认为true,需要审批。
:return: The approval_enabled of this UpdateEndpointServiceRequestBody.
:rtype: bool
"""
return self._approval_enabled
@approval_enabled.setter
def approval_enabled(self, approval_enabled):
"""Sets the approval_enabled of this UpdateEndpointServiceRequestBody.
是否需要审批。 ● false:不需审批,创建的终端节点连 接直接为accepted状态。 ● true:需审批,创建的终端节点连接 需要终端节点服务所属用户审核后方 可使用。 默认为true,需要审批。
:param approval_enabled: The approval_enabled of this UpdateEndpointServiceRequestBody.
:type: bool
"""
self._approval_enabled = approval_enabled
@property
def service_name(self):
"""Gets the service_name of this UpdateEndpointServiceRequestBody.
终端节点服务的名称,长度不大于16, 允许传入大小写字母、数字、下划线、 中划线。
:return: The service_name of this UpdateEndpointServiceRequestBody.
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this UpdateEndpointServiceRequestBody.
终端节点服务的名称,长度不大于16, 允许传入大小写字母、数字、下划线、 中划线。
:param service_name: The service_name of this UpdateEndpointServiceRequestBody.
:type: str
"""
self._service_name = service_name
@property
def ports(self):
"""Gets the ports of this UpdateEndpointServiceRequestBody.
服务开放的端口映射列表,详细内容请 参见表4-22。 同一个终端节点服务下,不允许重复的 端口映射。若多个终端节点服务共用一 个port_id,则终端节点之间服务的所有 端口映射的server_port和protocol的组合 不能重复,单次最多添加200个。
:return: The ports of this UpdateEndpointServiceRequestBody.
:rtype: list[PortList]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""Sets the ports of this UpdateEndpointServiceRequestBody.
服务开放的端口映射列表,详细内容请 参见表4-22。 同一个终端节点服务下,不允许重复的 端口映射。若多个终端节点服务共用一 个port_id,则终端节点之间服务的所有 端口映射的server_port和protocol的组合 不能重复,单次最多添加200个。
:param ports: The ports of this UpdateEndpointServiceRequestBody.
:type: list[PortList]
"""
self._ports = ports
@property
def port_id(self):
"""Gets the port_id of this UpdateEndpointServiceRequestBody.
标识终端节点服务后端资源的ID,格式 为通用唯一识别码(Universally Unique Identifier,下文简称UUID)。取值为: ● LB类型:增强型负载均衡器内网IP对 应的端口ID。详细内容请参考《弹性 负载均衡API参考》中的“查询负载均 衡详情”,详见响应消息中的 “vip_port_id”字段。 ● VM类型:弹性云服务器IP地址对应的 网卡ID。详细内容请参考《弹性云服 务器API参考》中的“查询云服务器网 卡信息”,详见响应消息中的 “port_id”字段。 ● VIP类型:虚拟资源所在物理服务器对 应的网卡ID。 说明 当后端资源为“LB类型”时,仅支持修改为 同类型后端资源的“vip_port_id”。 例如,共享型负载均衡仅支持更换为共享型 负载均衡,不支持更换为独享型负载均衡。
:return: The port_id of this UpdateEndpointServiceRequestBody.
:rtype: str
"""
return self._port_id
@port_id.setter
def port_id(self, port_id):
"""Sets the port_id of this UpdateEndpointServiceRequestBody.
标识终端节点服务后端资源的ID,格式 为通用唯一识别码(Universally Unique Identifier,下文简称UUID)。取值为: ● LB类型:增强型负载均衡器内网IP对 应的端口ID。详细内容请参考《弹性 负载均衡API参考》中的“查询负载均 衡详情”,详见响应消息中的 “vip_port_id”字段。 ● VM类型:弹性云服务器IP地址对应的 网卡ID。详细内容请参考《弹性云服 务器API参考》中的“查询云服务器网 卡信息”,详见响应消息中的 “port_id”字段。 ● VIP类型:虚拟资源所在物理服务器对 应的网卡ID。 说明 当后端资源为“LB类型”时,仅支持修改为 同类型后端资源的“vip_port_id”。 例如,共享型负载均衡仅支持更换为共享型 负载均衡,不支持更换为独享型负载均衡。
:param port_id: The port_id of this UpdateEndpointServiceRequestBody.
:type: str
"""
self._port_id = port_id
@property
def vip_port_id(self):
"""Gets the vip_port_id of this UpdateEndpointServiceRequestBody.
虚拟IP的网卡ID。
:return: The vip_port_id of this UpdateEndpointServiceRequestBody.
:rtype: str
"""
return self._vip_port_id
@vip_port_id.setter
def vip_port_id(self, vip_port_id):
"""Sets the vip_port_id of this UpdateEndpointServiceRequestBody.
虚拟IP的网卡ID。
:param vip_port_id: The vip_port_id of this UpdateEndpointServiceRequestBody.
:type: str
"""
self._vip_port_id = vip_port_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UpdateEndpointServiceRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
681d225433e47211d53e066d86b564a3b40b86f6 | c46754b9600a12df4f9d7a6320dfc19aa96b1e1d | /examples/flax/image-captioning/run_image_captioning_flax.py | bbc79977a467931ba4630dda11dc76e9380ae962 | [
"Apache-2.0"
] | permissive | huggingface/transformers | ccd52a0d7c59e5f13205f32fd96f55743ebc8814 | 4fa0aff21ee083d0197a898cdf17ff476fae2ac3 | refs/heads/main | 2023-09-05T19:47:38.981127 | 2023-09-05T19:21:33 | 2023-09-05T19:21:33 | 155,220,641 | 102,193 | 22,284 | Apache-2.0 | 2023-09-14T20:44:49 | 2018-10-29T13:56:00 | Python | UTF-8 | Python | false | false | 56,513 | py | #!/usr/bin/env python
# coding=utf-8
# Copyright 2022 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library vision-encoder-decoder models for image captioning.
"""
import json
import logging
import os
import sys
import time
import warnings
from dataclasses import asdict, dataclass, field
from enum import Enum
from functools import partial
from pathlib import Path
from typing import Callable, Optional
import datasets
import evaluate
import jax
import jax.numpy as jnp
import nltk # Here to have a nice missing dependency error message early on
import numpy as np
import optax
from datasets import Dataset, load_dataset
from filelock import FileLock
from flax import jax_utils, traverse_util
from flax.jax_utils import unreplicate
from flax.training import train_state
from flax.training.common_utils import get_metrics, onehot, shard, shard_prng_key
from huggingface_hub import Repository, create_repo
from PIL import Image
from tqdm import tqdm
import transformers
from transformers import (
AutoImageProcessor,
AutoTokenizer,
FlaxVisionEncoderDecoderModel,
HfArgumentParser,
is_tensorboard_available,
)
from transformers.utils import is_offline_mode, send_example_telemetry
logger = logging.getLogger(__name__)
try:
nltk.data.find("tokenizers/punkt")
except (LookupError, OSError):
if is_offline_mode():
raise LookupError(
"Offline mode: run this script without TRANSFORMERS_OFFLINE first to download nltk data files"
)
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
# Copied from transformers.models.bart.modeling_flax_bart.shift_tokens_right
def shift_tokens_right(input_ids: np.ndarray, pad_token_id: int, decoder_start_token_id: int) -> np.ndarray:
"""
Shift input ids one token to the right.
"""
shifted_input_ids = np.zeros_like(input_ids)
shifted_input_ids[:, 1:] = input_ids[:, :-1]
shifted_input_ids[:, 0] = decoder_start_token_id
shifted_input_ids = np.where(shifted_input_ids == -100, pad_token_id, shifted_input_ids)
return shifted_input_ids
@dataclass
class TrainingArguments:
output_dir: str = field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."},
)
overwrite_output_dir: bool = field(
default=False,
metadata={
"help": (
"Overwrite the content of the output directory. "
"Use this to continue training if output_dir points to a checkpoint directory."
)
},
)
do_train: bool = field(default=False, metadata={"help": "Whether to run training."})
do_eval: bool = field(default=False, metadata={"help": "Whether to run eval on the dev set."})
do_predict: bool = field(default=False, metadata={"help": "Whether to run predictions on the test set."})
per_device_train_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for training."}
)
per_device_eval_batch_size: int = field(
default=8, metadata={"help": "Batch size per GPU/TPU core/CPU for evaluation."}
)
_block_size_doc = """
The default value `0` will preprocess (tokenization + image processing) the whole dataset before training and
cache the results. This uses more disk space, but avoids (repeated) processing time during training. This is a
good option if your disk space is large enough to store the whole processed dataset.
If a positive value is given, the captions in the dataset will be tokenized before training and the results are
cached. During training, it iterates the dataset in chunks of size `block_size`. On each block, images are
transformed by the image processor with the results being kept in memory (no cache), and batches of size
`batch_size` are yielded before processing the next block. This could avoid the heavy disk usage when the
dataset is large.
"""
block_size: int = field(default=0, metadata={"help": _block_size_doc})
learning_rate: float = field(default=5e-5, metadata={"help": "The initial learning rate for AdamW."})
weight_decay: float = field(default=0.0, metadata={"help": "Weight decay for AdamW if we apply some."})
adam_beta1: float = field(default=0.9, metadata={"help": "Beta1 for AdamW optimizer"})
adam_beta2: float = field(default=0.999, metadata={"help": "Beta2 for AdamW optimizer"})
adam_epsilon: float = field(default=1e-8, metadata={"help": "Epsilon for AdamW optimizer."})
label_smoothing_factor: float = field(
default=0.0, metadata={"help": "The label smoothing epsilon to apply (zero means no label smoothing)."}
)
num_train_epochs: float = field(default=3.0, metadata={"help": "Total number of training epochs to perform."})
warmup_steps: int = field(default=0, metadata={"help": "Linear warmup over warmup_steps."})
logging_steps: int = field(default=500, metadata={"help": "Log every X updates steps."})
eval_steps: int = field(default=None, metadata={"help": "Run an evaluation every X steps."})
seed: int = field(default=42, metadata={"help": "Random seed that will be set at the beginning of training."})
push_to_hub: bool = field(
default=False, metadata={"help": "Whether or not to upload the trained model to the model hub after training."}
)
hub_model_id: str = field(
default=None, metadata={"help": "The name of the repository to keep in sync with the local `output_dir`."}
)
hub_token: str = field(default=None, metadata={"help": "The token to use to push to the Model Hub."})
def __post_init__(self):
if self.output_dir is not None:
self.output_dir = os.path.expanduser(self.output_dir)
def to_dict(self):
"""
Serializes this instance while replace `Enum` by their values (for JSON serialization support). It obfuscates
the token values by removing their value.
"""
d = asdict(self)
for k, v in d.items():
if isinstance(v, Enum):
d[k] = v.value
if isinstance(v, list) and len(v) > 0 and isinstance(v[0], Enum):
d[k] = [x.value for x in v]
if k.endswith("_token"):
d[k] = f"<{k.upper()}>"
return d
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune, or train from scratch.
"""
model_name_or_path: str = field(
metadata={"help": "The model checkpoint for weights initialization."},
)
cache_dir: Optional[str] = field(
default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"}
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
dtype: Optional[str] = field(
default="float32",
metadata={
"help": (
"Floating-point format in which the model weights should be initialized and trained. Choose one of"
" `[float32, float16, bfloat16]`."
)
},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `huggingface-cli login` (stored in `~/.huggingface`)."
)
},
)
use_auth_token: bool = field(
default=None,
metadata={
"help": "The `use_auth_token` argument is deprecated and will be removed in v4.34. Please use `token`."
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
"execute code present on the Hub on your local machine."
)
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
data_dir: Optional[str] = field(
default=None, metadata={"help": "The data directory of the dataset to use (via the datasets library)."}
)
image_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the full image file paths."},
)
caption_column: Optional[str] = field(
default=None,
metadata={"help": "The name of the column in the datasets containing the image captions."},
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input predict data file to do prediction on (a text file)."},
)
max_target_length: Optional[int] = field(
default=128,
metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
val_max_target_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
"during evaluation."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
predict_with_generate: bool = field(
default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."}
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": (
"Number of beams to use for evaluation. This argument will be passed to `model.generate`, "
"which is used during evaluation."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
def __post_init__(self):
if self.dataset_name is None and self.train_file is None and self.validation_file is None:
raise ValueError("Need either a dataset name or a training/validation file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
if extension not in ["csv", "json"]:
raise ValueError(f"`train_file` should be a csv or a json file, got {extension}.")
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
if extension not in ["csv", "json"]:
raise ValueError(f"`validation_file` should be a csv or a json file, got {extension}.")
if self.val_max_target_length is None:
self.val_max_target_length = self.max_target_length
image_captioning_name_mapping = {
"image_caption_dataset.py": ("image_path", "caption"),
}
class TrainState(train_state.TrainState):
dropout_rng: jnp.ndarray
def replicate(self):
return jax_utils.replicate(self).replace(dropout_rng=shard_prng_key(self.dropout_rng))
def data_loader(rng: jax.random.PRNGKey, dataset: Dataset, batch_size: int, shuffle: bool = False):
"""
Returns batches of size `batch_size` from truncated `dataset`, sharded over all local devices.
Shuffle batches if `shuffle` is `True`.
"""
steps = len(dataset) // batch_size # Skip incomplete batch.
# We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a
# dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation
# mechanism, which works differently from NumPy/SciPy.
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
batch_idx = np.asarray(batch_idx)
else:
batch_idx = np.arange(len(dataset))
for idx in range(steps):
start_idx = batch_size * idx
end_idx = batch_size * (idx + 1)
selected_indices = batch_idx[start_idx:end_idx]
batch = dataset[selected_indices]
batch = shard(batch)
yield batch
def write_metric(summary_writer, metrics, train_time, step, metric_key_prefix="train"):
if train_time:
summary_writer.scalar("train_time", train_time, step)
metrics = get_metrics(metrics)
for key, vals in metrics.items():
tag = f"{metric_key_prefix}_{key}"
for i, val in enumerate(vals):
summary_writer.scalar(tag, val, step - len(vals) + i + 1)
else:
for metric_name, value in metrics.items():
summary_writer.scalar(f"{metric_key_prefix}_{metric_name}", value, step)
def create_learning_rate_fn(
train_ds_size: int, train_batch_size: int, num_train_epochs: int, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
steps_per_epoch = train_ds_size // train_batch_size
num_train_steps = steps_per_epoch * num_train_epochs
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate, end_value=0, transition_steps=num_train_steps - num_warmup_steps
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
if model_args.use_auth_token is not None:
warnings.warn("The `use_auth_token` argument is deprecated and will be removed in v4.34.", FutureWarning)
if model_args.token is not None:
raise ValueError("`token` and `use_auth_token` are both specified. Please set only the argument `token`.")
model_args.token = model_args.use_auth_token
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_image_captioning", model_args, data_args, framework="flax")
if (
os.path.exists(training_args.output_dir)
and os.listdir(training_args.output_dir)
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty."
"Use --overwrite_output_dir to overcome."
)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# Set the verbosity to info of the Transformers logger (on main process only):
logger.info(f"Training/evaluation parameters {training_args}")
# Handle the repository creation
if training_args.push_to_hub:
# Retrieve of infer repo_name
repo_name = training_args.hub_model_id
if repo_name is None:
repo_name = Path(training_args.output_dir).absolute().name
# Create repo and retrieve repo_id
repo_id = create_repo(repo_name, exist_ok=True, token=training_args.hub_token).repo_id
# Clone repo locally
repo = Repository(training_args.output_dir, clone_from=repo_id, token=training_args.hub_token)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files this script will use the first column for the full image path and the second column for the
# captions (unless you specify column names for this with the `image_column` and `caption_column` arguments).
#
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
dataset = load_dataset(
data_args.dataset_name,
data_args.dataset_config_name,
cache_dir=model_args.cache_dir,
keep_in_memory=False,
data_dir=data_args.data_dir,
token=model_args.token,
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
dataset = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
model = FlaxVisionEncoderDecoderModel.from_pretrained(
model_args.model_name_or_path,
seed=training_args.seed,
dtype=getattr(jnp, model_args.dtype),
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
image_processor = AutoImageProcessor.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
tokenizer.pad_token = tokenizer.convert_ids_to_tokens(model.config.pad_token_id)
# Preprocessing the datasets.
# We need to tokenize inputs and targets.
if training_args.do_train:
column_names = dataset["train"].column_names
elif training_args.do_eval:
column_names = dataset["validation"].column_names
elif training_args.do_predict:
column_names = dataset["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
dataset_columns = image_captioning_name_mapping.get(data_args.dataset_name, None)
if data_args.image_column is None:
if dataset_columns is None:
raise ValueError(
f"`--dataset_name` {data_args.dataset_name} not found in dataset '{data_args.dataset_name}'. Make sure"
" to set `--dataset_name` to the correct dataset name, one of"
f" {', '.join(image_captioning_name_mapping.keys())}."
)
image_column = dataset_columns[0]
else:
image_column = data_args.image_column
if image_column not in column_names:
raise ValueError(
f"--image_column' value '{data_args.image_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.caption_column is None:
if dataset_columns is None:
raise ValueError(
f"`--dataset_name` {data_args.dataset_name} not found in dataset '{data_args.dataset_name}'. Make sure"
" to set `--dataset_name` to the correct dataset name, one of"
f" {', '.join(image_captioning_name_mapping.keys())}."
)
caption_column = dataset_columns[1]
else:
caption_column = data_args.caption_column
if caption_column not in column_names:
raise ValueError(
f"--caption_column' value '{data_args.caption_column}' needs to be one of: {', '.join(column_names)}"
)
# In Flax, for seq2seq models we need to pass `decoder_input_ids`
# as the Flax models don't accept `labels`, we need to prepare the decoder_input_ids here
# for that dynamically import the `shift_tokens_right` function from the model file
model_module = __import__(model.__module__, fromlist=["shift_tokens_right"])
shift_tokens_right_fn = getattr(model_module, "shift_tokens_right", shift_tokens_right)
def filter_fn(examples):
"""remove problematic images"""
bools = []
for image_file in examples[image_column]:
try:
image = Image.open(image_file)
image_processor(images=image, return_tensors="np")
bools.append(True)
except Exception:
bools.append(False)
return bools
# Setting padding="max_length" as we need fixed length inputs for jitted functions
def tokenization_fn(examples, max_target_length):
"""Run tokenization on captions."""
captions = []
for caption in examples[caption_column]:
captions.append(caption.lower() + " " + tokenizer.eos_token)
targets = captions
model_inputs = {}
labels = tokenizer(
text_target=targets,
max_length=max_target_length,
padding="max_length",
truncation=True,
return_tensors="np",
)
model_inputs["labels"] = labels["input_ids"]
decoder_input_ids = shift_tokens_right_fn(
labels["input_ids"], model.config.pad_token_id, model.config.decoder_start_token_id
)
model_inputs["decoder_input_ids"] = np.asarray(decoder_input_ids)
# We need decoder_attention_mask so we can ignore pad tokens from loss
model_inputs["decoder_attention_mask"] = labels["attention_mask"]
model_inputs[image_column] = examples[image_column]
return model_inputs
def image_processing_fn(examples, check_image=True):
"""
Run preprocessing on images
If `check_image` is `True`, the examples that fails during `Image.open()` will be caught and discarded.
Otherwise, an exception will be thrown.
"""
model_inputs = {}
if check_image:
images = []
to_keep = []
for image_file in examples[image_column]:
try:
img = Image.open(image_file)
images.append(img)
to_keep.append(True)
except Exception:
to_keep.append(False)
for k, v in examples.items():
if k != image_column:
model_inputs[k] = v[to_keep]
else:
images = [Image.open(image_file) for image_file in examples[image_column]]
encoder_inputs = image_processor(images=images, return_tensors="np")
model_inputs["pixel_values"] = encoder_inputs.pixel_values
return model_inputs
def preprocess_fn(examples, max_target_length, check_image=True):
"""Run tokenization + image processing"""
model_inputs = {}
# This contains image path column
model_inputs.update(tokenization_fn(examples, max_target_length))
model_inputs.update(image_processing_fn(model_inputs, check_image=check_image))
# Remove image path column
model_inputs.pop(image_column)
return model_inputs
features = datasets.Features(
{
"pixel_values": datasets.Array3D(
shape=(
getattr(model.config.encoder, "num_channels", 3),
model.config.encoder.image_size,
model.config.encoder.image_size,
),
dtype="float32",
),
"labels": datasets.Sequence(feature=datasets.Value(dtype="int32", id=None), length=-1, id=None),
"decoder_input_ids": datasets.Sequence(feature=datasets.Value(dtype="int32", id=None), length=-1, id=None),
"decoder_attention_mask": datasets.Sequence(
feature=datasets.Value(dtype="int32", id=None), length=-1, id=None
),
}
)
# If `block_size` is `0`, tokenization & image processing is done at the beginning
run_img_proc_at_beginning = training_args.block_size == 0
# Used in .map() below
function_kwarg = preprocess_fn if run_img_proc_at_beginning else tokenization_fn
# `features` is used only for the final preprocessed dataset (for the performance purpose).
features_kwarg = features if run_img_proc_at_beginning else None
# Keep `image_column` if the image processing is done during training
remove_columns_kwarg = [x for x in column_names if x != image_column or run_img_proc_at_beginning]
processor_names = "tokenizer and image processor" if run_img_proc_at_beginning else "tokenizer"
# Store some constant
train_batch_size = int(training_args.per_device_train_batch_size) * jax.device_count()
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
if training_args.block_size % train_batch_size > 0 or training_args.block_size % eval_batch_size > 0:
raise ValueError(
"`training_args.block_size` needs to be a multiple of the global train/eval batch size."
f"Got {training_args.block_size}, {train_batch_size} and {eval_batch_size} respectively instead."
)
if training_args.do_train:
if "train" not in dataset:
raise ValueError("--do_train requires a train dataset")
train_dataset = dataset["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# remove problematic examples
# (if image processing is performed at the beginning, the filtering is done during preprocessing below
# instead here.)
if not run_img_proc_at_beginning:
train_dataset = train_dataset.filter(filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers)
train_dataset = train_dataset.map(
function=function_kwarg,
batched=True,
num_proc=data_args.preprocessing_num_workers,
# kept image paths
remove_columns=remove_columns_kwarg,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Running {processor_names} on train dataset",
fn_kwargs={"max_target_length": data_args.max_target_length},
features=features_kwarg,
)
if run_img_proc_at_beginning:
# set format (for performance) since the dataset is ready to be used
train_dataset = train_dataset.with_format("numpy")
steps_per_epoch = len(train_dataset) // train_batch_size
num_train_examples_per_epoch = steps_per_epoch * train_batch_size
num_epochs = int(training_args.num_train_epochs)
total_train_steps = steps_per_epoch * num_epochs
else:
num_train_examples_per_epoch = 0
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = dataset["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
# remove problematic examples
# (if image processing is performed at the beginning, the filtering is done during preprocessing below
# instead here.)
if not run_img_proc_at_beginning:
eval_dataset = eval_dataset.filter(filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers)
eval_dataset = eval_dataset.map(
function=function_kwarg,
batched=True,
num_proc=data_args.preprocessing_num_workers,
# kept image paths
remove_columns=remove_columns_kwarg,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Running {processor_names} on validation dataset",
fn_kwargs={"max_target_length": data_args.val_max_target_length},
features=features_kwarg,
)
if run_img_proc_at_beginning:
# set format (for performance) since the dataset is ready to be used
eval_dataset = eval_dataset.with_format("numpy")
num_eval_examples = len(eval_dataset)
eval_steps = num_eval_examples // eval_batch_size
if training_args.do_predict:
if "test" not in dataset:
raise ValueError("--do_predict requires a test dataset")
predict_dataset = dataset["test"]
if data_args.max_predict_samples is not None:
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
# remove problematic examples
# (if image processing is performed at the beginning, the filtering is done during preprocessing below
# instead here.)
if not run_img_proc_at_beginning:
predict_dataset = predict_dataset.filter(
filter_fn, batched=True, num_proc=data_args.preprocessing_num_workers
)
predict_dataset = predict_dataset.map(
function=function_kwarg,
batched=True,
num_proc=data_args.preprocessing_num_workers,
# kept image paths
remove_columns=remove_columns_kwarg,
load_from_cache_file=not data_args.overwrite_cache,
desc=f"Running {processor_names} on prediction dataset",
fn_kwargs={"max_target_length": data_args.val_max_target_length},
features=features_kwarg,
)
if run_img_proc_at_beginning:
# set format (for performance) since the dataset is ready to be used
predict_dataset = predict_dataset.with_format("numpy")
num_test_examples = len(predict_dataset)
test_steps = num_test_examples // eval_batch_size
def blockwise_data_loader(
rng: jax.random.PRNGKey,
ds: Dataset,
block_size: int,
batch_size: int,
shuffle: bool = False,
keep_in_memory: bool = False,
split: str = "",
):
"""
Wrap the simple `data_loader` in a block-wise way if `block_size` > 0, else it's the same as `data_loader`.
If `block_size` > 0, it requires `ds` to have a column that gives image paths in order to perform image
processing (with the column name being specified by `image_column`). The tokenization should be done before
training in this case.
"""
# We use `numpy.ndarray` to interact with `datasets.Dataset`, since using `jax.numpy.array` to index into a
# dataset is significantly slow. Using JAX array at the 1st place is only to keep JAX's PRNGs generation
# mechanism, which works differently from NumPy/SciPy.
if shuffle:
indices = jax.random.permutation(rng, len(ds))
indices = np.asarray(indices)
else:
indices = np.arange(len(ds))
_block_size = len(ds) if not block_size else block_size
steps_per_block = _block_size // batch_size
num_examples = len(ds)
steps = num_examples // batch_size
num_splits = steps // steps_per_block + int(steps % steps_per_block > 0)
for idx in range(num_splits):
if not block_size:
_ds = ds
else:
start_idx = block_size * idx
end_idx = block_size * (idx + 1)
selected_indices = indices[start_idx:end_idx]
_ds = ds.select(selected_indices)
_ds = _ds.map(
image_processing_fn,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=[image_column],
load_from_cache_file=not data_args.overwrite_cache,
features=features,
keep_in_memory=keep_in_memory,
# The images are already checked either in `.filter()` or in `preprocess_fn()`
fn_kwargs={"check_image": False},
desc=f"Running image processing on {split} dataset".replace(" ", " "),
)
_ds = _ds.with_format("numpy")
# No need to shuffle here
loader = data_loader(rng, _ds, batch_size=batch_size, shuffle=False)
for batch in loader:
yield batch
# Metric
metric = evaluate.load("rouge")
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [label.strip() for label in labels]
# rougeLSum expects newline after each sentence
preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in preds]
labels = ["\n".join(nltk.sent_tokenize(label)) for label in labels]
return preds, labels
def compute_metrics(preds, labels):
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
# Some simple post-processing
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels, use_stemmer=True)
# Extract a few results from ROUGE
result = {key: value.mid.fmeasure * 100 for key, value in result.items()}
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 6) for k, v in result.items()}
return result, decoded_preds, decoded_labels
# Enable tensorboard only on the master node
has_tensorboard = is_tensorboard_available()
if has_tensorboard and jax.process_index() == 0:
try:
from flax.metrics.tensorboard import SummaryWriter
summary_writer = SummaryWriter(log_dir=Path(training_args.output_dir))
except ImportError as ie:
has_tensorboard = False
logger.warning(
f"Unable to display metrics through TensorBoard because some package are not installed: {ie}"
)
else:
logger.warning(
"Unable to display metrics through TensorBoard because the package is not installed: "
"Please run pip install tensorboard to enable."
)
# Initialize our training
rng = jax.random.PRNGKey(training_args.seed)
rng, dropout_rng = jax.random.split(rng)
# Create learning rate schedule
linear_decay_lr_schedule_fn = create_learning_rate_fn(
num_train_examples_per_epoch,
train_batch_size,
training_args.num_train_epochs,
training_args.warmup_steps,
training_args.learning_rate,
)
# We use Optax's "masking" functionality to not apply weight decay
# to bias and LayerNorm scale parameters. decay_mask_fn returns a
# mask boolean with the same structure as the parameters.
# The mask is True for parameters that should be decayed.
def decay_mask_fn(params):
flat_params = traverse_util.flatten_dict(params)
# find out all LayerNorm parameters
layer_norm_candidates = ["layernorm", "layer_norm", "ln"]
layer_norm_named_params = {
layer[-2:]
for layer_norm_name in layer_norm_candidates
for layer in flat_params.keys()
if layer_norm_name in "".join(layer).lower()
}
flat_mask = {path: (path[-1] != "bias" and path[-2:] not in layer_norm_named_params) for path in flat_params}
return traverse_util.unflatten_dict(flat_mask)
# create adam optimizer
adamw = optax.adamw(
learning_rate=linear_decay_lr_schedule_fn,
b1=training_args.adam_beta1,
b2=training_args.adam_beta2,
eps=training_args.adam_epsilon,
weight_decay=training_args.weight_decay,
mask=decay_mask_fn,
)
# Setup train state
state = TrainState.create(apply_fn=model.__call__, params=model.params, tx=adamw, dropout_rng=dropout_rng)
# label smoothed cross entropy
def loss_fn(logits, labels, padding_mask, label_smoothing_factor=0.0):
"""
The label smoothing implementation is adapted from Flax's official example:
https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104
"""
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing_factor
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
)
soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence)
loss = optax.softmax_cross_entropy(logits, soft_labels)
loss = loss - normalizing_constant
# ignore padded tokens from loss
loss = loss * padding_mask
loss = loss.sum()
num_labels = padding_mask.sum()
return loss, num_labels
# Define gradient update step fn
def train_step(state, batch, label_smoothing_factor=0.0):
dropout_rng, new_dropout_rng = jax.random.split(state.dropout_rng)
def compute_loss(params):
labels = batch.pop("labels")
logits = state.apply_fn(**batch, params=params, dropout_rng=dropout_rng, train=True)[0]
loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
return loss, num_labels
grad_fn = jax.value_and_grad(compute_loss, has_aux=True)
(loss, num_labels), grad = grad_fn(state.params)
num_labels = jax.lax.psum(num_labels, "batch")
# true loss = total loss / total samples
loss = jax.lax.psum(loss, "batch")
loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
# true grad = total grad / total samples
grad = jax.lax.psum(grad, "batch")
grad = jax.tree_util.tree_map(lambda x: x / num_labels, grad)
new_state = state.apply_gradients(grads=grad, dropout_rng=new_dropout_rng)
metrics = {"loss": loss, "learning_rate": linear_decay_lr_schedule_fn(state.step)}
return new_state, metrics
# Define eval fn
def eval_step(params, batch, label_smoothing_factor=0.0):
labels = batch.pop("labels")
logits = model(**batch, params=params, train=False)[0]
loss, num_labels = loss_fn(logits, labels, batch["decoder_attention_mask"], label_smoothing_factor)
num_labels = jax.lax.psum(num_labels, "batch")
# true loss = total loss / total samples
loss = jax.lax.psum(loss, "batch")
loss = jax.tree_util.tree_map(lambda x: x / num_labels, loss)
metrics = {"loss": loss}
return metrics
# Define generation function
max_length = (
data_args.val_max_target_length if data_args.val_max_target_length is not None else model.config.max_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else model.config.num_beams
gen_kwargs = {"max_length": max_length, "num_beams": num_beams}
def generate_step(params, batch):
model.params = params
output_ids = model.generate(batch["pixel_values"], **gen_kwargs)
return output_ids.sequences
# Create parallel version of the train and eval step
p_train_step = jax.pmap(
partial(train_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch", donate_argnums=(0,)
)
p_eval_step = jax.pmap(partial(eval_step, label_smoothing_factor=training_args.label_smoothing_factor), "batch")
p_generate_step = jax.pmap(generate_step, "batch")
# Replicate the train state on each device
state = state.replicate()
if training_args.do_train:
logger.info("***** Running training *****")
logger.info(f" Num train examples = {num_train_examples_per_epoch}")
logger.info(f" Num Epochs = {num_epochs}")
logger.info(f" Instantaneous train batch size per device = {training_args.per_device_train_batch_size}")
logger.info(f" Total train batch size (w. parallel & distributed) = {train_batch_size}")
logger.info(f" Optimization steps per epoch = {steps_per_epoch}")
logger.info(f" Total optimization steps = {total_train_steps}")
if training_args.do_eval:
logger.info(f" Num evaluation examples = {num_eval_examples}")
logger.info(f" Instantaneous evaluation batch size per device = {training_args.per_device_eval_batch_size}")
logger.info(f" Total evaluation batch size (w. parallel & distributed) = {eval_batch_size}")
logger.info(f" Evaluation steps = {eval_steps}")
if training_args.do_predict:
logger.info(f" Num test examples = {num_test_examples}")
logger.info(f" Instantaneous test batch size per device = {training_args.per_device_eval_batch_size}")
logger.info(f" Total test batch size (w. parallel & distributed) = {eval_batch_size}")
logger.info(f" Test steps = {test_steps}")
# create output directory
if not os.path.isdir(os.path.join(training_args.output_dir)):
os.makedirs(os.path.join(training_args.output_dir), exist_ok=True)
def save_ckpt(ckpt_dir: str, commit_msg: str = ""):
"""save checkpoints and push to Hugging Face Hub if specified"""
# save checkpoint after each epoch and push checkpoint to the hub
if jax.process_index() == 0:
params = jax.device_get(jax.tree_util.tree_map(lambda x: x[0], state.params))
model.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir), params=params)
tokenizer.save_pretrained(os.path.join(training_args.output_dir, ckpt_dir))
if training_args.push_to_hub:
repo.push_to_hub(commit_message=commit_msg, blocking=False)
def evaluation_loop(
rng: jax.random.PRNGKey,
dataset: Dataset,
metric_key_prefix: str = "eval",
ckpt_dir: str = "",
is_prediction=False,
):
logger.info(f"*** {'Predict' if is_prediction else 'Evaluate'} ***")
metrics = []
preds = []
labels = []
batches = blockwise_data_loader(
rng,
dataset,
block_size=training_args.block_size,
batch_size=eval_batch_size,
keep_in_memory=False,
shuffle=False,
split="prediction" if is_prediction else "validation",
)
steps = len(dataset) // eval_batch_size
for _ in tqdm(
range(steps), desc=f"{'Predicting' if is_prediction else 'Evaluating'}...", position=2, leave=False
):
# Model forward
batch = next(batches)
_labels = batch.get("labels", None)
if not is_prediction and _labels is None:
raise ValueError("Evaluation requires the validation dataset to have `labels`")
if _labels is not None:
_metrics = p_eval_step(state.params, batch)
metrics.append(_metrics)
# generation
if data_args.predict_with_generate:
generated_ids = p_generate_step(state.params, batch)
preds.extend(jax.device_get(generated_ids.reshape(-1, gen_kwargs["max_length"])))
if _labels is not None:
labels.extend(jax.device_get(_labels.reshape(-1, _labels.shape[-1])))
if metrics:
# normalize metrics
metrics = get_metrics(metrics)
metrics = jax.tree_util.tree_map(jnp.mean, metrics)
# compute ROUGE metrics
generations = []
rouge_desc = ""
if data_args.predict_with_generate:
if labels:
rouge_metrics, decoded_preds, decoded_labels = compute_metrics(preds, labels)
metrics.update(rouge_metrics)
rouge_desc = " ".join(
[
f"{'Predict' if is_prediction else 'Eval'} {key}: {value} |"
for key, value in rouge_metrics.items()
]
)
for pred, label in zip(decoded_preds, decoded_labels):
pred = pred.replace("\n", " ")
label = label.replace("\n", " ")
generations.append({"label": label, "pred": pred})
else:
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Some simple post-processing
decoded_preds = [pred.strip() for pred in decoded_preds]
# rougeLSum expects newline after each sentence
decoded_preds = ["\n".join(nltk.sent_tokenize(pred)) for pred in decoded_preds]
for pred in decoded_preds:
pred = pred.replace("\n", " ")
generations.append({"pred": pred})
if metrics:
# Print metrics and update progress bar
desc = f"{'Predict' if is_prediction else 'Eval'} Loss: {metrics['loss']} | {rouge_desc})"
if training_args.do_train and not is_prediction:
desc = f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | " + desc
epochs.write(desc)
epochs.desc = desc
logger.info(desc)
if jax.process_index() == 0:
if not os.path.isdir(os.path.join(training_args.output_dir, ckpt_dir)):
os.makedirs(os.path.join(training_args.output_dir, ckpt_dir), exist_ok=True)
if metrics:
# Save metrics (only for the evaluation/prediction being done along with training)
if has_tensorboard and training_args.do_train:
write_metric(
summary_writer, metrics, train_time=None, step=cur_step, metric_key_prefix=metric_key_prefix
)
# save final metrics in json
metrics = {
f"{metric_key_prefix}_{metric_name}": round(value.item(), 6)
for metric_name, value in metrics.items()
}
_path = os.path.join(training_args.output_dir, ckpt_dir, f"{metric_key_prefix}_results.json")
with open(_path, "w") as f:
json.dump(metrics, f, indent=4, sort_keys=True)
# Update report
with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
fp.write(desc + "\n")
# Save generations
if generations:
output_file = os.path.join(training_args.output_dir, ckpt_dir, f"{metric_key_prefix}_generation.json")
with open(output_file, "w", encoding="UTF-8") as fp:
json.dump(generations, fp, ensure_ascii=False, indent=4)
def evaluate(rng: jax.random.PRNGKey, dataset: Dataset, ckpt_dir: str = ""):
evaluation_loop(rng, dataset, metric_key_prefix="eval", ckpt_dir=ckpt_dir)
def predict(rng: jax.random.PRNGKey, dataset: Dataset):
evaluation_loop(rng, dataset, metric_key_prefix="test", is_prediction=True)
input_rng = None
if training_args.do_train:
cur_step = 0
train_time = 0
epochs = tqdm(range(num_epochs), desc=f"Epoch ... (1/{num_epochs})", position=0)
for epoch in epochs:
# ======================== Training ================================
# Create sampling rng
rng, input_rng = jax.random.split(rng)
train_metrics = []
train_batches = blockwise_data_loader(
input_rng,
train_dataset,
block_size=training_args.block_size,
batch_size=train_batch_size,
keep_in_memory=True,
shuffle=True,
split="train",
)
# train
for batch_idx, _ in enumerate(tqdm(range(steps_per_epoch), desc="Training...", position=1, leave=False)):
cur_step += 1
batch = next(train_batches)
batch_start = time.time()
state, train_metric = p_train_step(state, batch)
train_metrics.append(train_metric)
train_time += time.time() - batch_start
time_per_step = train_time / cur_step
# log and save info
if training_args.logging_steps > 0 and cur_step % training_args.logging_steps == 0:
_train_metric = unreplicate(train_metric)
desc = (
f"Epoch... ({epoch + 1}/{num_epochs} | Step: {cur_step} | Loss: {_train_metric['loss']} |"
f" Learning Rate: {_train_metric['learning_rate']} | Time per step: {time_per_step})"
)
epochs.desc = desc
epochs.write(desc)
logger.info(desc)
with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
fp.write(desc + "\n")
# Save metrics
if has_tensorboard and jax.process_index() == 0:
write_metric(
summary_writer,
train_metrics,
train_time=train_time,
step=cur_step,
metric_key_prefix="train",
)
# ======================== Evaluating (inside an epoch) ==============================
if (
training_args.do_eval
and (training_args.eval_steps is not None and training_args.eval_steps > 0)
and cur_step % training_args.eval_steps == 0
):
ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}"
commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}"
evaluate(input_rng, eval_dataset, ckpt_dir)
save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg)
# ======================== Epoch End ==============================
# log and save info
if training_args.logging_steps <= 0:
logger.info(desc)
with open(os.path.join(training_args.output_dir, "log"), "a", encoding="UTF-8") as fp:
fp.write(desc + "\n")
# Save metrics
if has_tensorboard and jax.process_index() == 0:
write_metric(
summary_writer, train_metrics, train_time=train_time, step=cur_step, metric_key_prefix="train"
)
# ======================== Evaluating (after each epoch) ==============================
if training_args.do_eval and (training_args.eval_steps is None or training_args.eval_steps <= 0):
ckpt_dir = f"ckpt_epoch_{epoch + 1}_step_{cur_step}"
commit_msg = f"Saving weights and logs of epoch {epoch + 1} - step {cur_step}"
evaluate(input_rng, eval_dataset, ckpt_dir)
save_ckpt(ckpt_dir=ckpt_dir, commit_msg=commit_msg)
# ======================== Evaluating | Predicting ==============================
# Create sampling rng
if input_rng is None:
rng, input_rng = jax.random.split(rng)
# run evaluation without training
if training_args.do_eval and not training_args.do_train:
evaluate(input_rng, eval_dataset)
# run prediction after (or without) training
if training_args.do_predict:
predict(input_rng, predict_dataset)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
774afe9bfbd6e0fc09cc788a6fa5e93094dfbc0c | 6c137e70bb6b1b618fbbceddaeb74416d387520f | /spyre/spyre/spyrelets/freqSweep_spyrelet.py | c88d864d47ca403ea0d31e6f4980f09a030b1c1e | [
"BSD-2-Clause"
] | permissive | zhong-lab/code | fe497c75662f8c3b7ab3c01e7e351bff6d5e8d15 | b810362e06b44387f0768353c602ec5d29b551a2 | refs/heads/master | 2023-01-28T09:46:01.448833 | 2022-06-12T22:53:47 | 2022-06-12T22:53:47 | 184,670,765 | 2 | 7 | BSD-2-Clause | 2022-12-08T21:46:15 | 2019-05-02T23:37:39 | Python | UTF-8 | Python | false | false | 10,111 | py | import numpy as np
import pyqtgraph as pg
import time
import random
import matplotlib.pyplot as plt
from PyQt5.Qsci import QsciScintilla, QsciLexerPython
from spyre import Spyrelet, Task, Element
from spyre.widgets.task import TaskWidget
from spyre.plotting import LinePlotWidget,HeatmapPlotWidget
from spyre.widgets.rangespace import Rangespace
from spyre.widgets.param_widget import ParamWidget
from spyre.widgets.repository_widget import RepositoryWidget
from lantz import Q_
import time
import os
from lantz.drivers.spectrum import MS2721B
from lantz.drivers.mwsource import SynthNVPro
from lantz.log import log_to_screen, DEBUG
volt = Q_(1, 'V')
milivolt = Q_(1, 'mV')
Hz = Q_(1, 'Hz')
kHz=Q_(1,'kHz')
MHz = Q_(1.0,'MHz')
dB = Q_(1,'dB')
dBm = Q_(1,'dB')
channel=1
freq_low=5070
freq_size=0.02
freq_list=[]
S12_list=[]
x_count=1000
y_count=1
power=-40
class Sweep(Spyrelet):
requires = {
'analyzer': MS2721B,
'source': SynthNVPro
}
qutag = None
@Task()
def set_analyzer_freq(self):
self.dataset.clear()
log_to_screen(DEBUG)
analyzer_freq_params = self.Analyzer_Frequency_Settings.widget.get()
span = analyzer_freq_params['frequency span']
center = analyzer_freq_params['center freq']
self.analyzer.freq_span = span
self.analyzer.freq_cent = center
print('Setting frequency done!')
@set_analyzer_freq.initializer
def initialize(self):
return
@set_analyzer_freq.finalizer
def finalize(self):
return
@Task()
def set_analyzer_amp(self):
self.dataset.clear()
log_to_screen(DEBUG)
analyzer_amp_params = self.Analyzer_Amplitude_Settings.widget.get()
ref = analyzer_amp_params['ref level']
scale = analyzer_amp_params['scale']
self.analyzer.ref_level = ref*dBm
self.analyzer.Y_scale = scale*dBm
@set_analyzer_amp.initializer
def initialize(self):
print('set_amp initialize')
print('idn: {}'.format(self.analyzer.idn))
return
@set_analyzer_amp.finalizer
def finalize(self):
print('set_amp finalize')
return
@Task()
def set_analyzer_marker(self):
self.dataset.clear()
log_to_screen(DEBUG)
analyzer_marker_params = self.Analyzer_Marker_Settings.widget.get()
chnl = analyzer_marker_params['channel']
stat = analyzer_marker_params['state']
self.analyzer.marker[chnl] = stat
@set_analyzer_marker.initializer
def initialize(self):
return
@set_analyzer_marker.finalizer
def finalize(self):
return
@Task()
def sweep_frequency(self):
self.dataset.clear()
log_to_screen(DEBUG)
sweep_frequency_params = self.Sweep_frequency_Settings.widget.get()
chnl = sweep_frequency_params['marker channel']
mk_freq = sweep_frequency_params['marker frequency']
fr_low = sweep_frequency_params['start frequency']
fr_high = sweep_frequency_params['stop frequency']
fr_stp = sweep_frequency_params['step']
t_stp = sweep_frequency_params['step time']
pw = sweep_frequency_params['sweep power']
name = sweep_frequency_params['txt name']
self.analyzer.marker[chnl] = 'ON'
self.analyzer.marker_X[chnl] = mk_freq
self.source.sweep_lower=fr_low
self.source.sweep_upper=fr_high
self.source.sweep_size=fr_stp
self.source.sweep_step_time=t_stp
self.source.power=pw
self.source.output=1
self.source.sweep_run=1
while(int(self.source.sweep_run)==1):
power=self.analyzer.marker_Y[chnl].magnitude
frequency=self.source.frequency.magnitude
with open('D:/MW data/test/20190813/JTWPA/scan_1/{}.txt'.format(name),'a') as file:
write_str='%f %f\n'%(frequency,power)
file.write(write_str)
return
@sweep_frequency.initializer
def initialize(self):
return
@sweep_frequency.finalizer
def finalize(self):
return
@Task()
def sweep_power_frequency(self):
self.dataset.clear()
log_to_screen(DEBUG)
sweep_pw_fr_params = self.Sweep_Power_and_Frequency_Settings.widget.get()
chnl = sweep_pw_fr_params['marker channel']
mk_freq = sweep_pw_fr_params['marker frequency']
p_low = sweep_pw_fr_params['start power']
p_high = sweep_pw_fr_params['stop power']
p_stp = sweep_pw_fr_params['step power']
fr_low = sweep_pw_fr_params['start frequency']
fr_high = sweep_pw_fr_params['stop frequency']
fr_stp = sweep_pw_fr_params['step frequency']
stp_t = sweep_pw_fr_params['step time']
name = sweep_pw_fr_params['txt name']
self.analyzer.marker[chnl] = 'ON'
self.analyzer.marker_X[chnl] = mk_freq
self.source.sweep_lower=fr_low
self.source.sweep_upper=fr_high
self.source.sweep_size=fr_stp
self.source.sweep_step_time=stp_t
self.source.output=1
pw_count=(p_high-p_low)/p_stp
for pw_point in range(int(pw_count)):
pw_current_value=p_low+pw_point*p_stp
self.source.sweep_power_low=pw_current_value
self.source.sweep_power_high=pw_current_value
self.source.sweep_run=1
while(int(self.source.sweep_run)==1):
S=self.analyzer.marker_Y[chnl].magnitude
frequency=self.source.frequency.magnitude
power=float(self.source.power)
with open('D:/MW data/test/20190805/power sweep/{}.txt'.format(name),'a') as file:
write_str='%f %f %f\n'%(frequency,power,S)
file.write(write_str)
time.sleep(0.2)
return
@sweep_power_frequency.initializer
def initialize(self):
return
@sweep_power_frequency.finalizer
def finalize(self):
return
@Task()
def set_source_freq(self):
self.dataset.clear()
log_to_screen(DEBUG)
source_freq_params = self.Source_Frequency_Settings.widget.get()
stat = source_freq_params['output state']
freq = source_freq_params['frequency']
self.source.output=stat
self.source.frequency=freq
print('Setting frequency done!')
@set_source_freq.initializer
def initialize(self):
return
@set_source_freq.finalizer
def finalize(self):
return
@Task()
def set_source_stb(self):
self.dataset.clear()
log_to_screen(DEBUG)
source_stb_params = self.Source_Stability_Settings.widget.get()
pll = source_stb_params['pll pump current']
spc = source_stb_params['channel spacing']
self.source.PLL_charge_pump_current=pll
self.source.channel_spacing=spc
@set_source_freq.initializer
def initialize(self):
return
@set_source_freq.finalizer
def finalize(self):
return
@Element()
def Analyzer_Frequency_Settings(self):
analyzer_freq_params = [
('frequency span', {'type': float, 'default': 3000, 'units': 'Hz'}),
('center freq', {'type': float, 'default': 30000000, 'units': 'Hz'}),
]
w = ParamWidget(analyzer_freq_params)
return w
@Element()
def Analyzer_Amplitude_Settings(self):
analyzer_amp_params = [
('ref level', {'type': float, 'default': 0}),
('scale', {'type': float, 'default': 0}),
]
w = ParamWidget(analyzer_amp_params)
return w
@Element()
def Analyzer_Marker_Settings(self):
analyzer_marker_params = [
('channel', {'type': int, 'default': 1}),
('state', {'type': str, 'default': 'OFF'}),
]
w = ParamWidget(analyzer_marker_params)
return w
@Element()
def Source_Frequency_Settings(self):
source_freq_params = [
('output state', {'type': int, 'default': 0}),
('frequency', {'type': float, 'default': 200,'units': 'MHz'}),
]
w = ParamWidget(source_freq_params)
return w
@Element()
def Source_Stability_Settings(self):
source_stb_params = [
('pll pump current', {'type': int, 'default': 5}),
('channel spacing', {'type': float, 'default': 100,'units': 'Hz'}),
]
w = ParamWidget(source_stb_params)
return w
@Element()
def Sweep_frequency_Settings(self):
sweep_freq_params = [
('start frequency', {'type': float, 'default': 20,'units':'MHz'}),
('stop frequency', {'type': float, 'default': 40,'units': 'MHz'}),
('step', {'type': float, 'default': 1,'units': 'MHz'}),
('step time', {'type': float, 'default': 1,'units': 'ms'}),
('sweep power', {'type': float, 'default': 0}),
('measure times', {'type': int, 'default': 3}),
('marker channel', {'type': int, 'default': 1}),
('marker frequency', {'type': float, 'default': 30,'units':'MHz'}),
('txt name', {'type': str, 'default': '11'}),
]
w = ParamWidget(sweep_freq_params)
return w
@Element()
def Sweep_Power_and_Frequency_Settings(self):
sweep_pw_fr_params = [
('start frequency', {'type': float, 'default': 20,'units':'MHz'}),
('stop frequency', {'type': float, 'default': 40,'units': 'MHz'}),
('step frequency', {'type': float, 'default': 1,'units':'MHz'}),
('start power', {'type': float, 'default': -10}),
('stop power', {'type': float, 'default': 8}),
('step power', {'type': float, 'default': 0.1}),
('step time', {'type': float, 'default': 1,'units': 'ms'}),
('marker channel', {'type': int, 'default': 1}),
('marker frequency', {'type': float, 'default': 30,'units':'MHz'}),
('txt name', {'type': str, 'default': '11'}),
]
w = ParamWidget(sweep_pw_fr_params)
return w | [
"none"
] | none |
aa044018408c1b28331bd6260ab341a84fc5a936 | bc167f434158921bcf2c678155c5cdfec1c9b0c9 | /PI_code/simulator/behaviourGeneration/buildBehaviour.py~ | 61fb21ba3d039ecacb623931e2bf58e4efd62509 | [] | no_license | s0217391/DifferentProjects | 6450efc89c64ecd21b86c705737e89e5c69433a6 | 7f4da153660817b6cbf72d2e823aa29c0c2f95a9 | refs/heads/master | 2021-01-17T02:58:46.219240 | 2015-05-26T22:45:46 | 2015-05-26T22:45:46 | 34,995,164 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | #!/usr/bin/python
import sys
import expressionBuilder as eb
def writeLine(f, st = '', tabs = 0):
result = ""
for i in range(tabs):
result = result + " "
result = result + st
f.write(result)
f.write('\n')
def startFile(f):
writeLine(f, '#!/usr/bin/python');
writeLine(f, 'import sys');
writeLine(f)
writeLine(f, "def compute(prey, otherHunter, dist):")
def main(argv=None):
for i in range(0, 500):
newscript = open("group/behav" + str(i + 1) + ".py", 'w')
startFile(newscript)
lines = eb.generateCodeBlock(seed = i, minlns = 3, maxlns = 25)
for x in lines:
(line, tabs) = x
writeLine(newscript, line, tabs + 1)
if __name__ == "__main__":
sys.exit(main())
| [
"[email protected]"
] | ||
716b5557af847737363d572821718326db017f6c | e2e9ae72910dd29877de026866a6f13335815ca6 | /prml/kernels/kernel.py | 03ad8127e24cddaca11a6e7edd0c46a111d14bf3 | [] | no_license | zshwuhan/PRML | b39f09e14cd1169ff44e7299b8adfdd3aea2f94d | 497d985f6387fc31d5fe861533cb333e06f80469 | refs/heads/master | 2021-05-14T23:40:27.351492 | 2017-09-21T12:48:46 | 2017-09-21T12:48:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 540 | py | import numpy as np
class Kernel(object):
def _pairwise(self, x, y):
"""
all pairs of x and y
Parameters
----------
x : (sample_size, n_features)
input
y : (sample_size, n_features)
another input
Returns
-------
output : tuple
two array with shape (sample_size, sample_size, n_features)
"""
return (
np.tile(x, (len(y), 1, 1)).transpose(1, 0, 2),
np.tile(y, (len(x), 1, 1))
)
| [
"[email protected]"
] | |
82461d66890b1704b59ea9a59d8c4729e2e080b8 | f085af63a93cb12feca75a3d9e855c3373d2b78e | /dynamic_programming/longest_nondecreasing_subsequence_length.py | ec8cd8dc441a12123420e33dfbc9572f6318d970 | [] | no_license | zjxpirate/Daily-Upload-Python | 9542f1a3491ac5c843bc80266523bc06c37be20e | d5efcfdaf7e632e1f0cb8b21c505c0c0a5325eb0 | refs/heads/master | 2020-04-03T08:10:55.667410 | 2019-06-27T01:15:36 | 2019-06-27T01:15:36 | 155,124,951 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py |
# 15. find the longest nondecreasing subsequence
list1 = [0, 8, 4, 12, 2, 10, 6, 14, 1, 9]
def longest_nondecreasing_subsequence_length(A):
# max_length[i] holds the length of the longest nondecreasing subsequence of A[:i + 1].
max_length = [1] * len(A)
for i in range(1, len(A)):
max_length[i] = max(1 + max((max_length[j] for j in range(i) if A[i] >= A[j]), default=0), max_length[i])
return max(max_length)
print(longest_nondecreasing_subsequence_length(list1))
| [
"[email protected]"
] | |
2482542d358ddfb289a586947cba5d91d129a318 | f445450ac693b466ca20b42f1ac82071d32dd991 | /generated_tempdir_2019_09_15_163300/generated_part007021.py | 2025aafae69c6640949ff7fe65450879584c3cdd | [] | no_license | Upabjojr/rubi_generated | 76e43cbafe70b4e1516fb761cabd9e5257691374 | cd35e9e51722b04fb159ada3d5811d62a423e429 | refs/heads/master | 2020-07-25T17:26:19.227918 | 2019-09-15T15:41:48 | 2019-09-15T15:41:48 | 208,357,412 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,300 | py | from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher137075(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i2.3.1.2.1.0', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher137075._instance is None:
CommutativeMatcher137075._instance = CommutativeMatcher137075()
return CommutativeMatcher137075._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 137074
return
yield
from collections import deque | [
"[email protected]"
] | |
617571ece63ec1c75aaac8ba508201a4956ff656 | ce741ade3d7ebfc64cf2736358f6e77b06168830 | /apps/users/models.py | 548f619885ca2197b733f8ec22122e00804d4162 | [] | no_license | Erick-LONG/MxShop | 798a1ce4eb557973732ee6206640bdf9a247216b | 783e5d66a4d49b3eceb3eb6d7c729fcfa69742cb | refs/heads/master | 2021-04-03T08:31:41.588749 | 2018-03-22T04:01:46 | 2018-03-22T04:01:46 | 124,395,167 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | from datetime import datetime
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class UserProfile(AbstractUser):
'''用户'''
name = models.CharField(max_length=30,null=True,blank=True,verbose_name='姓名')
birthday = models.DateField(null=True,blank=True,verbose_name='出生年月')
mobile = models.CharField(null=True,blank=True,max_length=11,verbose_name='电话')
gender = models.CharField(max_length=6,choices=(('male','男'),('female','女'),),default='female',verbose_name='性别')
email = models.EmailField(max_length=100,null=True,blank=True,verbose_name='邮箱')
class Meta:
verbose_name = '用户'
verbose_name_plural = verbose_name
def __str__(self):
return self.username
class VerifyCode(models.Model):
'''短信验证码'''
code = models.CharField(max_length=10,verbose_name='验证码')
mobile = models.CharField(max_length=11, verbose_name='电话')
add_time = models.DateTimeField(default=datetime.now,verbose_name='添加时间')
class Meta:
verbose_name = '短信验证码'
verbose_name_plural = verbose_name
def __str__(self):
return self.code | [
"[email protected]"
] | |
60f10d228169389471da351b6d96d2bffe92e6f0 | 2d27360e2038546a38746912fa75dbde8667ee61 | /make_dogC.py | cc4a3b7215091b543f863bdc11c36a5f125ffa5e | [
"MIT"
] | permissive | matteoferla/DogCatcher | 234353eb0e2f8177e59314e62d901b13bed1a265 | ff7edb88b73aa0585d8f6528ccfc22939c934fb5 | refs/heads/master | 2023-05-07T18:16:58.632024 | 2021-06-02T07:23:35 | 2021-06-02T07:23:35 | 257,064,986 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,538 | py | from model_maker import Catcher, pyrosetta
import json
pymol = pyrosetta.PyMOLMover()
dogC = Catcher(lyx=9, asx=121, glh=70, asx_type='ASN', cut_resi=105, other_res=['WAT'],
params_folder='params',
iso_constraint_file='constraints/iso.dogC.cst',
trans_constraint_file='constraints/ASA-LYX.dogC.cst')
## Starting pose
print('Starting pose')
#pose = dogC.load_pose_from_file('data/RrgA.altered.pdb')
pose = dogC.load_pose_from_file('../RrgA.relaxed.pdb')
pymol.pymol_name('init')
pymol.apply(pose)
# dogC.relax_with_ED(pose, 'data/2ww8.ccp4')
pymol.apply(pose)
logbook = {}
s = dogC.get_score_panel(pose, save_variants=True, filename='models/00_initial')
s['description'] = 'PDB:2WW8 734-860 energy minimised against CCP4 map'
logbook['native'] = s
json.dump(logbook, open('scores.json', 'w'))
# G109T
print('G109T')
G109T = dogC.make_mutant(pose, 'G109T')
s = dogC.get_score_panel(G109T, save_variants=True, filename='models/01a_G109T')
s['description'] = 'PDB:2WW8 734-860 G109T'
logbook['G109T'] = s
json.dump(logbook, open('scores.json', 'w'))
pymol.pymol_name('G109T')
pymol.apply(G109T)
# N115G
print('N115G')
N115G = dogC.make_mutant(pose, 'N115G')
s = dogC.get_score_panel(N115G, save_variants=True, filename='models/01b_N115G')
s['description'] = 'PDB:2WW8 734-860 N115G'
logbook['N115G'] = s
json.dump(logbook, open('scores.json', 'w'))
pymol.pymol_name('N115G')
pymol.apply(N115G)
# G109T N115G
print('G109T N115G')
base = dogC.make_mutant(G109T, 'N115G')
s = dogC.get_score_panel(base, save_variants=True, filename='models/02_dogC')
s['description'] = 'PDB:2WW8 734-860 G109T N115G "DogC"'
logbook['dogC'] = s
json.dump(logbook, open('scores.json', 'w'))
pymol.pymol_name('DogC')
pymol.apply(base)
#A75P
print('A75P')
A75P = dogC.make_mutant(base, 'A75P')
dogC.relax_loop(A75P, 73, 80)
s = dogC.get_score_panel(A75P, save_variants=True, filename='models/03_A75P')
s['description'] = 'A75P'
logbook['A75P'] = s
json.dump(logbook, open('scores.json', 'w'))
pair_A = dogC.make_double_mutant(A75P, ['N11D', 'N13T'])
s = dogC.get_score_panel(pair_A, save_variants=True, filename='models/04a_N11D_N13T')
s['description'] = 'N11D N13T A75P'
logbook['N11D N13T'] = s
pair_B = dogC.make_double_mutant(A75P, ['D4E', 'K59T'])
s = dogC.get_score_panel(pair_B, save_variants=True, filename='models/04b_D4E_K59T')
s['description'] = 'D4E K59T A75P'
logbook['D4E K59T'] = s
pair_C = dogC.make_double_mutant(A75P, ['A87E', 'I101A'])
s = dogC.get_score_panel(pair_C, save_variants=True, filename='models/04c_A87E_I101A')
s['description'] = 'A75P A87E I101A'
logbook['A87E I101A'] = s
quad = dogC.make_double_mutant(pair_A, ['D4E', 'K59T'])
s = dogC.get_score_panel(quad, save_variants=True, filename='models/05_D4E_N11D_N13T_K59T')
s['description'] = 'D4E N11D N13T K59T'
logbook['D4E N11D N13T K59T A75P'] = s
for letter, resi in (('d', 'A38P'), ('e','Y45G'), ('f','N47D'), ('g','N92D'), ('h','A87E')):
x = dogC.make_mutant(A75P, resi)
s = dogC.get_score_panel(x, save_variants=True, filename=f'models/04{letter}_{resi}')
s['description'] = f'A75P {resi}'
logbook[resi] = s
json.dump(logbook, open('scores.json', 'w'))
pair_D = dogC.make_double_mutant(A75P, ['A87E', 'I101A'])
s = dogC.get_score_panel(pair_D, save_variants=True, filename='models/04i_N47D_N92D')
s['description'] = 'N47D A75P N92D'
logbook['N47D A75P N92D'] = s
aqua = dogC.make_double_mutant(quad, ['N92D', 'N47D'])
s = dogC.get_score_panel(aqua, save_variants=True, filename='models/06_N47D_N92D')
s['description'] = 'D4E N11D N13T N47D A75P K59T N92D'
logbook['D4E N11D N13T N47D A75P K59T N92D'] = s
F69I = dogC.make_mutant(aqua, 'F69I')
s = dogC.get_score_panel(F69I, save_variants=True, filename='models/07a_F69I')
s['description'] = '+ F69I'
logbook['F69I'] = s
json.dump(logbook, open('scores.json', 'w'))
Q89R = dogC.make_mutant(aqua, 'Q89R')
s = dogC.get_score_panel(Q89R, save_variants=True, filename='models/07b_Q89R')
s['description'] = '+ Q89R'
logbook['Q89R'] = s
json.dump(logbook, open('scores.json', 'w'))
A87S = dogC.make_mutant(aqua, 'A87S')
s = dogC.get_score_panel(A87S, save_variants=True, filename='models/07c_A87S')
s['description'] = '+ A87S'
logbook['A87S'] = s
json.dump(logbook, open('scores.json', 'w'))
phage = dogC.make_double_mutant(aqua, ['Q89R', 'A87S', 'F69I'])
s = dogC.get_score_panel(phage, save_variants=True, filename='models/08_F69I_A87S_Q89R')
s['description'] = '+ F69I A87S Q89R'
logbook['F69I A87S Q89R'] = s
json.dump(logbook, open('scores.json', 'w')) | [
"[email protected]"
] | |
e0d8b0932a51cce603529841b3292b2ad1ba6353 | 38c10c01007624cd2056884f25e0d6ab85442194 | /chrome/browser/resources/settings/controls/compiled_resources.gyp | 75cbbb52ea0a4979d02b08222e3564aa9cac2829 | [
"BSD-3-Clause"
] | permissive | zenoalbisser/chromium | 6ecf37b6c030c84f1b26282bc4ef95769c62a9b2 | e71f21b9b4b9b839f5093301974a45545dad2691 | refs/heads/master | 2022-12-25T14:23:18.568575 | 2016-07-14T21:49:52 | 2016-07-23T08:02:51 | 63,980,627 | 0 | 2 | BSD-3-Clause | 2022-12-12T12:43:41 | 2016-07-22T20:14:04 | null | UTF-8 | Python | false | false | 1,932 | gyp | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'settings_checkbox',
'variables': {
'depends': [
'../../../../../ui/webui/resources/js/compiled_resources.gyp:assert',
'../../../../../ui/webui/resources/js/compiled_resources.gyp:load_time_data',
'../../../../../ui/webui/resources/cr_elements/policy/cr_policy_indicator_behavior.js',
'../../../../../ui/webui/resources/cr_elements/policy/cr_policy_pref_behavior.js',
],
'externs': [
'../../../../../third_party/closure_compiler/externs/settings_private.js'
],
},
'includes': ['../../../../../third_party/closure_compiler/compile_js.gypi'],
},
{
'target_name': 'settings_input',
'variables': {
'depends': [
'../../../../../ui/webui/resources/js/compiled_resources.gyp:assert',
'../../../../../ui/webui/resources/js/compiled_resources.gyp:load_time_data',
'../../../../../ui/webui/resources/cr_elements/policy/cr_policy_indicator_behavior.js',
'../../../../../ui/webui/resources/cr_elements/policy/cr_policy_pref_behavior.js',
],
'externs': [
'../../../../../third_party/closure_compiler/externs/settings_private.js'
],
},
'includes': ['../../../../../third_party/closure_compiler/compile_js.gypi'],
},
{
'target_name': 'settings_radio_group',
'variables': {
'depends': [
'../../../../../ui/webui/resources/js/compiled_resources.gyp:assert',
],
'externs': [
'../../../../../third_party/closure_compiler/externs/settings_private.js'
],
},
'includes': ['../../../../../third_party/closure_compiler/compile_js.gypi'],
},
],
}
| [
"[email protected]"
] | |
65b8f955f5a177896154f778e3d5a466193e38b0 | 2352bc07e12b0256913559cf3485a360569ccd5e | /Practice/code_class/Crossin-practices/python-cocos2d/practice.py | 03ae4f527a72db206c7f6622f7b49d67d17d3b0c | [] | no_license | Dis-count/Python_practice | 166ae563be7f6d99a12bdc0e221c550ef37bd4fd | fa0cae54e853157a1d2d78bf90408c68ce617c1a | refs/heads/master | 2022-12-12T03:38:24.091529 | 2021-12-22T09:51:59 | 2021-12-22T09:51:59 | 224,171,833 | 2 | 1 | null | 2022-12-08T05:29:38 | 2019-11-26T11:07:00 | Jupyter Notebook | UTF-8 | Python | false | false | 1,050 | py | # -*- coding: utf-8 -*-
import cocos
import random
class Testgame(cocos.layer.Layer):
# pass
def __init__(self):
super(Testgame,self).__init__()
# self.logo = cocos.sprite.Sprite()
# self.logo.position = 550,400
# self.add(self.logo,9999)
txt = cocos.text.Label(u'最棒了最棒了')
txt.position = 300,200
self.add(txt)
self.ppx = cocos.sprite.Sprite('ppx_rush1.png')
self.ppx.position = 200,300
self.add(self.ppx)
self.speed_x = 3
self.speed_y = 3
self.schedule(self.update)
def update(self,dt):
self.ppx.x += self.speed_x
if self.ppx.x > 600:
self.speed_x = -(1+4*random.random())
elif self.ppx.x < 0:
self.speed_x = 3
self.ppx.y += self.speed_y
if self.ppx.y > 480:
self.speed_y = -3
elif self.ppx.y <0:
self.speed_y = 3
cocos.director.director.init(caption=u'测试')
cocos.director.director.run(cocos.scene.Scene(Testgame()))
| [
"[email protected]"
] | |
98e13b59b62ddedc3ca4697d2082d3d67f574de7 | eb0fc861564058487117325298eccce468f6ceb8 | /yo/services/notification_sender/cli.py | adae1c7d8b43e370f382cb7e84ec7aa0569d7141 | [
"MIT"
] | permissive | dpays/dpay-notifications | 239c92243ae53c485bfa44bb7b7e344203645241 | 32b1cdcd58d622407fd50206053c5b9735a56ba9 | refs/heads/master | 2020-03-26T21:19:22.793303 | 2018-09-08T04:26:12 | 2018-09-08T04:26:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | # -*- coding: utf-8 -*-
import click
import yo.yolog
@click.command(name='sender')
@click.option('--database_url', envvar='DATABASE_URL')
def yo_noitification_sender_service(database_url):
from yo.services.notification_sender import main_task
main_task(database_url=database_url)
if __name__ == '__main__':
yo_noitification_sender_service()
| [
"[email protected]"
] | |
c11ddbb3c8c40a72606cae4a86c24ef46d3da507 | acf426a78ded4a078063d05457075fedba8f5310 | /mn_wifi/sumo/traci/_inductionloop.py | 47c322177a8a427c3fa85e06cc1e7b7947b2c0bb | [
"LicenseRef-scancode-x11-stanford"
] | permissive | intrig-unicamp/mininet-wifi | 3b58e6cf7b422cfe0f8990e173e77d7ba1d54616 | 985bf0ca2f11ca2ba17e44518e0df550070ddfba | refs/heads/master | 2023-08-27T03:36:41.005380 | 2023-07-27T13:07:32 | 2023-07-27T13:07:32 | 35,002,369 | 419 | 278 | NOASSERTION | 2023-09-12T03:42:45 | 2015-05-03T22:03:07 | Python | UTF-8 | Python | false | false | 4,605 | py | # -*- coding: utf-8 -*-
# Eclipse SUMO, Simulation of Urban MObility; see https://eclipse.org/sumo
# Copyright (C) 2011-2017 German Aerospace Center (DLR) and others.
# This program and the accompanying materials
# are made available under the terms of the Eclipse Public License v2.0
# which accompanies this distribution, and is available at
# http://www.eclipse.org/legal/epl-v20.html
# @file _inductionloop.py
# @author Michael Behrisch
# @author Daniel Krajzewicz
# @date 2011-03-16
# @version $Id$
from __future__ import absolute_import
from .domain import Domain
from .storage import Storage
from . import constants as tc
def readVehicleData(result):
result.readLength()
nbData = result.readDouble()
data = []
for i in range(nbData):
result.read("!B")
vehID = result.readString()
result.read("!B")
length = result.readDouble()
result.read("!B")
entryTime = result.readDouble()
result.read("!B")
leaveTime = result.readDouble()
result.read("!B")
typeID = result.readString()
data.append([vehID, length, entryTime, leaveTime, typeID])
return data
_RETURN_VALUE_FUNC = {tc.VAR_POSITION: Storage.readDouble,
tc.VAR_LANE_ID: Storage.readString,
tc.LAST_STEP_VEHICLE_NUMBER: Storage.readDouble,
tc.LAST_STEP_MEAN_SPEED: Storage.readDouble,
tc.LAST_STEP_VEHICLE_ID_LIST: Storage.readStringList,
tc.LAST_STEP_OCCUPANCY: Storage.readDouble,
tc.LAST_STEP_LENGTH: Storage.readDouble,
tc.LAST_STEP_TIME_SINCE_DETECTION: Storage.readDouble,
tc.LAST_STEP_VEHICLE_DATA: readVehicleData}
class InductionLoopDomain(Domain):
def __init__(self):
Domain.__init__(self, "inductionloop", tc.CMD_GET_INDUCTIONLOOP_VARIABLE, None,
tc.CMD_SUBSCRIBE_INDUCTIONLOOP_VARIABLE, tc.RESPONSE_SUBSCRIBE_INDUCTIONLOOP_VARIABLE,
tc.CMD_SUBSCRIBE_INDUCTIONLOOP_CONTEXT, tc.RESPONSE_SUBSCRIBE_INDUCTIONLOOP_CONTEXT,
_RETURN_VALUE_FUNC)
def getPosition(self, loopID):
"""getPosition(string) -> double
Returns the position measured from the beginning of the lane in meters.
"""
return self._getUniversal(tc.VAR_POSITION, loopID)
def getLaneID(self, loopID):
"""getLaneID(string) -> string
Returns the id of the lane the loop is on.
"""
return self._getUniversal(tc.VAR_LANE_ID, loopID)
def getLastStepVehicleNumber(self, loopID):
"""getLastStepVehicleNumber(string) -> integer
Returns the number of vehicles that were on the named induction loop within the last simulation step.
"""
return self._getUniversal(tc.LAST_STEP_VEHICLE_NUMBER, loopID)
def getLastStepMeanSpeed(self, loopID):
"""getLastStepMeanSpeed(string) -> double
Returns the mean speed in m/s of vehicles that were on the named induction loop within the last simulation step.
"""
return self._getUniversal(tc.LAST_STEP_MEAN_SPEED, loopID)
def getLastStepVehicleIDs(self, loopID):
"""getLastStepVehicleIDs(string) -> list(string)
Returns the list of ids of vehicles that were on the named induction loop in the last simulation step.
"""
return self._getUniversal(tc.LAST_STEP_VEHICLE_ID_LIST, loopID)
def getLastStepOccupancy(self, loopID):
"""getLastStepOccupancy(string) -> double
Returns the percentage of time the detector was occupied by a vehicle.
"""
return self._getUniversal(tc.LAST_STEP_OCCUPANCY, loopID)
def getLastStepMeanLength(self, loopID):
"""getLastStepMeanLength(string) -> double
Returns the mean length in m of vehicles which were on the detector in the last step.
"""
return self._getUniversal(tc.LAST_STEP_LENGTH, loopID)
def getTimeSinceDetection(self, loopID):
"""getTimeSinceDetection(string) -> double
Returns the time in s since last detection.
"""
return self._getUniversal(tc.LAST_STEP_TIME_SINCE_DETECTION, loopID)
def getVehicleData(self, loopID):
"""getVehicleData(string) -> [(veh_id, veh_length, entry_time, exit_time, vType), ...]
Returns a complex structure containing several information about vehicles which passed the detector.
"""
return self._getUniversal(tc.LAST_STEP_VEHICLE_DATA, loopID)
InductionLoopDomain() | [
"[email protected]"
] | |
eb7e5849b5f2b010cd504a91f7d1992192d5f1ef | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2634/48102/248806.py | 7d3c7140dc5506ab531b4029d5a8e97dfc1a1732 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | def compare(a):
return a[0] / a[1]
def search(ls: list, target: int) -> list:
res = []
for i in range(len(ls)-1):
for j in range(i+1, len(ls)):
res.append([ls[i], ls[j]])
res.sort(key=compare)
return res[target-1]
lst = eval(input())
t = int(input())
print(search(lst, t)) | [
"[email protected]"
] | |
6ebdf216116dcd30b8c2d256c9d9e25c1236a86e | d0533b0574494b13606a557620f38f5a2c74ce16 | /venv/lib/python3.7/site-packages/mongoengine/base/common.py | 82d2441786f944d1b4b33738eef42a64caa091a5 | [
"GPL-1.0-or-later",
"MIT",
"LicenseRef-scancode-other-copyleft"
] | permissive | CatTiger/vnpy | af889666464ab661fb30fdb0e8f71f94ba2d1e41 | 7901a0fb80a5b44d6fc752bd4b2b64ec62c8f84b | refs/heads/master | 2020-09-26T00:37:54.123877 | 2020-07-13T10:15:46 | 2020-07-13T10:15:46 | 226,124,078 | 0 | 0 | MIT | 2020-04-21T03:02:20 | 2019-12-05T14:44:55 | C++ | UTF-8 | Python | false | false | 1,492 | py | from mongoengine.errors import NotRegistered
__all__ = ('UPDATE_OPERATORS', 'get_document', '_document_registry')
UPDATE_OPERATORS = {'set', 'unset', 'inc', 'dec', 'mul',
'pop', 'push', 'push_all', 'pull',
'pull_all', 'add_to_set', 'set_on_insert',
'min', 'max', 'rename'}
_document_registry = {}
def get_document(name):
"""Get a registered Document class by name."""
doc = _document_registry.get(name, None)
if not doc:
# Possible old style name
single_end = name.split('.')[-1]
compound_end = '.%s' % single_end
possible_match = [k for k in _document_registry
if k.endswith(compound_end) or k == single_end]
if len(possible_match) == 1:
doc = _document_registry.get(possible_match.pop(), None)
if not doc:
raise NotRegistered("""
`%s` has not been registered in the document registry.
Importing the document class automatically registers it, has it
been imported?
""".strip() % name)
return doc
def _get_documents_by_db(connection_alias, default_connection_alias):
"""Get all registered Documents class attached to a given database"""
def get_doc_alias(doc_cls):
return doc_cls._meta.get('db_alias', default_connection_alias)
return [doc_cls for doc_cls in list(_document_registry.values())
if get_doc_alias(doc_cls) == connection_alias]
| [
"[email protected]"
] | |
a8fd073471237610101d66b5b5288bd5c2f35af1 | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/devtestlab/__init__.py | 7c60ecc2f02f80c2494ffe58a8560274e9866950 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 282 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Make subpackages available:
from . import (
latest,
v20150521preview,
v20160515,
v20180915,
)
| [
"[email protected]"
] | |
b480f0d4a79df1e543ab8471ddd8b32a713f46a5 | a61f73dd4cfd8d863e566f6b7422e1637967a1d7 | /abci/example/python3/abci/wire.py | 72f5fab8b0b227b6282599fe053b95fb0b57f555 | [
"Apache-2.0"
] | permissive | FirmaChain/tendermint | e1d91ee4c17f908c9f07d0771621201e9552e81f | aaa060fda4e3a564a32f1ba81f05cea93f6e34ce | refs/heads/master | 2020-08-13T07:36:03.482612 | 2019-10-11T16:07:58 | 2019-10-11T16:07:58 | 214,933,104 | 14 | 0 | Apache-2.0 | 2019-10-14T02:43:54 | 2019-10-14T02:43:53 | null | UTF-8 | Python | false | false | 2,599 | py |
# the decoder works off a reader
# the encoder returns bytearray
def hex2bytes(h):
return bytearray(h.decode('hex'))
def bytes2hex(b):
if type(b) in (str, str):
return "".join([hex(ord(c))[2:].zfill(2) for c in b])
else:
return bytes2hex(b.decode())
# expects uvarint64 (no crazy big nums!)
def uvarint_size(i):
if i == 0:
return 0
for j in range(1, 8):
if i < 1 << j * 8:
return j
return 8
# expects i < 2**size
def encode_big_endian(i, size):
if size == 0:
return bytearray()
return encode_big_endian(i // 256, size - 1) + bytearray([i % 256])
def decode_big_endian(reader, size):
if size == 0:
return 0
firstByte = reader.read(1)[0]
return firstByte * (256 ** (size - 1)) + decode_big_endian(reader, size - 1)
# ints are max 16 bytes long
def encode_varint(i):
negate = False
if i < 0:
negate = True
i = -i
size = uvarint_size(i)
if size == 0:
return bytearray([0])
big_end = encode_big_endian(i, size)
if negate:
size += 0xF0
return bytearray([size]) + big_end
# returns the int and whats left of the byte array
def decode_varint(reader):
size = reader.read(1)[0]
if size == 0:
return 0
negate = True if size > int(0xF0) else False
if negate:
size = size - 0xF0
i = decode_big_endian(reader, size)
if negate:
i = i * (-1)
return i
def encode_string(s):
size = encode_varint(len(s))
return size + bytearray(s, 'utf8')
def decode_string(reader):
length = decode_varint(reader)
raw_data = reader.read(length)
return raw_data.decode()
def encode_list(s):
b = bytearray()
list(map(b.extend, list(map(encode, s))))
return encode_varint(len(s)) + b
def encode(s):
print('encoding', repr(s))
if s is None:
return bytearray()
if isinstance(s, int):
return encode_varint(s)
elif isinstance(s, str):
return encode_string(s)
elif isinstance(s, list):
return encode_list(s)
elif isinstance(s, bytearray):
return encode_string(s)
else:
print("UNSUPPORTED TYPE!", type(s), s)
if __name__ == '__main__':
ns = [100, 100, 1000, 256]
ss = [2, 5, 5, 2]
bs = list(map(encode_big_endian, ns, ss))
ds = list(map(decode_big_endian, bs, ss))
print(ns)
print([i[0] for i in ds])
ss = ["abc", "hi there jim", "ok now what"]
e = list(map(encode_string, ss))
d = list(map(decode_string, e))
print(ss)
print([i[0] for i in d])
| [
"[email protected]"
] | |
ee818994979e4463590d325b2c982d6640272e36 | 7332aca74ee82c69a64737668f6aba4a20078b1b | /mysite/settings.py | dba505eed042889de7cf95363039bd098723daab | [] | no_license | bcastillo1/my-first-blog | f1609730a2a69dca94c07b0ed017717ad8218d85 | 4b4f3cfa66f2aff51e8b33805d6dc0e0314afa55 | refs/heads/master | 2020-03-30T07:00:39.662973 | 2018-09-29T22:26:58 | 2018-09-29T22:26:58 | 150,907,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,181 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 2.0.8.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_x1u4*9l1=$7byhrg7yr&7%ozhw7+jne_avltk@9gb^qc8i2qv'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [ '127.0.0.1', '.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static') | [
"[email protected]"
] | |
6ecccd89ce3b40ed429583a7acf5dce755c3a3b2 | 6b265b404d74b09e1b1e3710e8ea872cd50f4263 | /Python/Ising/ising_sim.py | 480aca6617840c8fc9257de0549d3c56f609e818 | [
"CC-BY-4.0"
] | permissive | gjbex/training-material | cdc189469ae2c7d43784ecdcb4bcca10ecbc21ae | e748466a2af9f3388a8b0ed091aa061dbfc752d6 | refs/heads/master | 2023-08-17T11:02:27.322865 | 2023-04-27T14:42:55 | 2023-04-27T14:42:55 | 18,587,808 | 130 | 60 | CC-BY-4.0 | 2023-08-03T07:07:25 | 2014-04-09T06:35:58 | Jupyter Notebook | UTF-8 | Python | false | false | 4,699 | py | #!/usr/bin/env python
if __name__ == '__main__':
from argparse import ArgumentParser
import importlib
import random
import sys
from runner import DomainSizeRunner
from averager import Averager
from util import print_options
arg_parser = ArgumentParser(description='2D ising system simulaation')
arg_parser.add_argument('--N', type=int, default=10,
help='system size, i.e., N x N')
arg_parser.add_argument('--J', type=float, default=1.0,
help='spin-spin interactino energy')
arg_parser.add_argument('--H', type=float, default=0.0,
help='magnetic field')
arg_parser.add_argument('--T', default='1.5',
help='temerature in units k_b, can be '
'a comma-separated list')
arg_parser.add_argument('--steps', type=int, default=1000,
help='number of simulation steps')
arg_parser.add_argument('--burn_in', type=int, default=100,
help='burn in for averages')
arg_parser.add_argument('--sample_period', type=int, default=10,
help='sample period for averages')
arg_parser.add_argument('--window', type=int, default=20,
help='window size for convergence test')
arg_parser.add_argument('--max_slope', type=float, default=1e-4,
help='maximum slope as convergence criterion')
arg_parser.add_argument('--runs', type=int, default=10,
help='number of independent runs')
arg_parser.add_argument('--file', default='result',
help='output file base name')
arg_parser.add_argument('--verbose', type=int, default=1,
help='show progress information, the '
'higher the value, the more detail')
arg_parser.add_argument('--seed', type=int,
help='seed for random number generator, '
'only needed for development')
arg_parser.add_argument('--python', action='store_true',
help='use pure Python implementation')
options = arg_parser.parse_args()
magn_file = open('{0}-magn.txt'.format(options.file), 'w')
domain_file = open('{0}-domains.txt'.format(options.file), 'w')
if options.seed is None:
seed = random.randint(0, 1000000000)
else:
seed = options.seed
if options.python:
ising_module = importlib.import_module('ising')
else:
ising_module = importlib.import_module('ising_cxx')
hdr_line_fmt = 'T {M:s} {E:s} {deltaE2:s}\n'
hdr_fmt = '{0:s} {0:s}_std {0:s}_min {0:s}_max'
val_line_fmt = '{T:.4f} {M:s} {E:s} {deltaE2:s}\n'
val_fmt = '{mean:.5e} {std:.5e} {min:.5e} {max:.5e}'
M_hdr = hdr_fmt.format('M')
E_hdr = hdr_fmt.format('E')
deltaE2_hdr = hdr_fmt.format('deltaE^2')
magn_file.write(hdr_line_fmt.format(M=M_hdr, E=E_hdr,
deltaE2=deltaE2_hdr))
print_options(magn_file, options)
domain_file.write('T domain_sizes\n')
print_options(domain_file, options)
for T in (float(T_str) for T_str in options.T.split(',')):
if options.verbose > 0:
sys.stderr.write('# computing T = {0:.4f}\n'.format(T))
ising = ising_module.IsingSystem(options.N, options.J, options.H, T)
ising.init_random(seed)
runner = DomainSizeRunner(ising=None, steps=options.steps,
is_verbose=options.verbose - 2,
burn_in=options.burn_in,
sample_period=options.sample_period,
window=options.window)
averager = Averager(runner, ising, is_verbose=options.verbose - 1)
averager.average(options.runs)
M_values = averager.get('M mean')
M_str = val_fmt.format(**M_values)
E_values = averager.get('E mean')
E_str = val_fmt.format(**E_values)
deltaE2_values = averager.get('deltaE^2')
deltaE2_str = val_fmt.format(**deltaE2_values)
magn_file.write(val_line_fmt.format(T=T, M=M_str, E=E_str,
deltaE2=deltaE2_str))
magn_file.flush()
domains = averager.get('domains')
distrubtion = ','.join(['{0:d}:{1:.8e}'.format(k, v)
for k, v in domains.items()])
domain_file.write('{0:.4f} {1:s}\n'.format(T, distrubtion))
domain_file.flush()
magn_file.close()
domain_file.close()
| [
"[email protected]"
] | |
daf6ce6d701a3f7fd5d4d5556db1235f1aea559b | 2d227925231be797cc78b644358ecd3adf00fba7 | /ce/c240.py | 802087ab037d7ba1d123f6f276c8a9eb0c788b26 | [] | no_license | egalli64/pythonesque | 6bb107189d4556d832175d41366ea0b18ed6ea1d | 154042c5ae5cf43a0ae2c03d509fc48d1dc19eb8 | refs/heads/master | 2023-07-07T05:50:14.711023 | 2023-07-01T10:52:24 | 2023-07-01T10:52:24 | 53,720,525 | 21 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,294 | py | """
Mersenne prime
author: Manny [email protected]
info: http://thisthread.blogspot.com/2017/02/codeeval-mersenne-prime.html
https://www.codeeval.com/open_challenges/240/
"""
import sys
from math import sqrt
marsennes = [(2, 3)]
def is_prime(number):
for divisor in range(2, int(sqrt(number)) + 1):
if number % divisor == 0:
return False
return True
def trim(value):
i = 0
while marsennes[i][1] < value:
i += 1
return marsennes[:i]
def get_marsennes(value):
if value <= marsennes[-1][1]:
return trim(value)
index = marsennes[-1][0]
while True:
index += 1
if not is_prime(index):
continue
marsenne = 2**index - 1
marsennes.append((index, marsenne))
if marsenne < value:
continue
return marsennes if marsenne == value else marsennes[:-1]
def solution(line):
value = int(line)
result = get_marsennes(value)
return ', '.join(map(str, [r[1] for r in result]))
if __name__ == '__main__':
if len(sys.argv) == 2:
test_cases = open(sys.argv[1], 'r')
for test in test_cases:
print(solution(test.rstrip('\n')))
test_cases.close()
else:
print('Data filename expected as argument!') | [
"[email protected]"
] | |
efc014f939c2d949b0cd19569ef658a32e5e1fe1 | cb530e68e4151f793b42b84a86e75794e71efbc0 | /containerkeys/middleware.py | 993be599e94d17cbe850e168e41066d55a3807c3 | [
"Apache-2.0"
] | permissive | CloudBrewery/swift-container-keys | 7614ffdf313a8cb90b128be08a4845a438705312 | c6406dbc45858c13bd5cc7935dc81a9129141529 | refs/heads/master | 2021-01-15T11:12:04.166532 | 2015-01-09T17:41:14 | 2015-01-09T17:41:14 | 29,027,338 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,099 | py | # Copyright (c) 2015 Cloud Brewery Inc. (cloudbrewery.io)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Container Keys Middleware
Allows for access to containers based on a simple key rather than requiring
a user to supply Keystone credentials, and embed them in their application.
Container keys supports two keys, specifically X-Container-Meta-Full-Key and
X-Container-Meta-Read-Key. Whichever is supplied will be authenticated against.
Read-Key will only accept GET requests, not POST/PUT/DELETE, which would be
supported by Full-Key.
Multiple keys can be set per container, of the following format:
X-Container-Meta-[Full|Read](-[1-9])?
The maximum number of keys that are checked is configurable, and can be used
to implement multiple valid keys per container, or a key recycling policy.
"""
from collections import defaultdict
from swift.common import utils as swift_utils
from swift.common.swob import HTTPUnauthorized
from swift.proxy.controllers.base import get_container_info
FULL_KEY = 'Full-Key'
READ_KEY = 'Read-Key'
FULL_KEY_HEADER = 'HTTP_X_CONTAINER_META_FULL_KEY'
READ_KEY_HAEDER = 'HTTP_X_CONTAINER_META_READ_KEY'
READ_RESTRICTED_METHODS = ['PUT', 'POST', 'DELETE']
DEFAULT_MAX_KEYS_PER_CONTAINER = 3
def generate_valid_metadata_keynames(key_name, max_keys):
"""
Generates a set of valid key names stored in a container's metadata to
include in results
:param key_name: base key (unprefixed)
:param max_keys: max number of valid keys
:returns: list of names of keys that are valid.
"""
cmp_key = key_name.lower()
valid_keynames = [
"%s-%s" % (cmp_key, i + 1) for i in xrange(1, max_keys)]
return [cmp_key, ] + valid_keynames
def get_container_keys_from_metadata(meta, max_keys):
"""
Extracts the container keys from metadata.
:param meta: container metadata
:param max_keys: max number of valid keys to check on a container
:returns: dict of keys found (possibly empty if no keys set)
"""
keys = defaultdict(list)
full_keys = generate_valid_metadata_keynames(FULL_KEY, max_keys)
read_keys = generate_valid_metadata_keynames(READ_KEY, max_keys)
for key, value in meta.iteritems():
v = swift_utils.get_valid_utf8_str(value)
cmp_key = key.lower()
if cmp_key in full_keys:
keys[FULL_KEY].append(v)
elif cmp_key in read_keys:
keys[READ_KEY].append(v)
return keys
def key_matches(to_match, keys):
"""
Checks whether the to_match key is in the list of keys. This leverages
the swift streq_const_time string comparator to guard against timing
attacks.
:param to_match: a key to check contains
:param keys: a list of keys to compare against
:returns: boolean
"""
return any(
[swift_utils.streq_const_time(to_match, key) for key in keys])
def extract_request_keys(env):
"""
Returns the key attempting to be used for the request by appearance in
the request headers
:param env: The WSGI environment for the request.
:returns: key type, key value
"""
headers = env.keys()
if FULL_KEY_HEADER in headers:
return FULL_KEY, env.get(FULL_KEY_HEADER)
elif READ_KEY_HAEDER in headers:
return READ_KEY, env.get(READ_KEY_HAEDER)
return None, None
class ContainerKeys(object):
"""
WSGI Middleware to grant access to containers based on pre-defined
Read / Full access API keys on a per-container basis. See the overview
for more information.
:param app: The next WSGI filter or app in the paste.deploy chain.
:param conf: The configuration dict for the middleware.
"""
def __init__(self, app, conf, max_keys_per_container=DEFAULT_MAX_KEYS_PER_CONTAINER):
self.app = app
self.conf = conf
self.max_keys_per_container = max_keys_per_container
self.logger = swift_utils.get_logger(conf, log_route='containerkeys')
def __call__(self, env, start_response):
"""
Main hook into the WSGI paste.deploy filter/app pipeline.
:param env: The WSGI environment dict.
:param start_response: The WSGI start_response hook.
:returns: Response as per WSGI.
"""
# Start by passing through based on the least amount of processing
# possible to regular auth.
if env.get('HTTP_X_AUTH_TOKEN', False):
# user is trying standard auth, continue the request per usual.
return self.app(env, start_response)
try_key_type, try_key_value = extract_request_keys(env)
if not try_key_value or not try_key_type:
# if no headers were attempted, pass through to keystone
# empty api key header is a no-op
return self.app(env, start_response)
keys = self._get_container_keys(env, start_response)
if not keys:
# if no keys are set on a container, pass through to keystone
return self.app(env, start_response)
#
# Begin marking requests as invalid, a user actually want to try now.
#
if try_key_type == READ_KEY:
if not key_matches(try_key_value, keys.get(READ_KEY)):
# invalid key
return self._invalid(env, start_response)
if env['REQUEST_METHOD'] in READ_RESTRICTED_METHODS:
# read keys cannot do non-read actions
return self._invalid(env, start_response)
elif (try_key_type == FULL_KEY
and not key_matches(try_key_value, keys.get(FULL_KEY))):
# invalid full key
return self._invalid(env, start_response)
#
# Thundercats are GO. Tell us not to continue authorization down the
# stream.
#
env['swift.authorize_override'] = True
return self.app(env, start_response)
def _get_container_keys(self, env, account):
"""
Returns the X-Container-Meta-[Full|Read]-Key-[N]? header values for the
container, or an empty dict if none are set.
:param env: The WSGI environment for the request.
:param account: Account str.
:returns: {key_type: key_value}
"""
container_info = get_container_info(env, self.app, swift_source='CK')
return get_container_keys_from_metadata(container_info['meta'],
self.max_keys_per_container)
def _invalid(self, env, start_response):
"""
Performs the necessary steps to indicate a WSGI 401
Unauthorized response to the request.
:param env: The WSGI environment for the request.
:param start_response: The WSGI start_response hook.
:returns: 401 response as per WSGI.
"""
if env['REQUEST_METHOD'] == 'HEAD':
body = None
else:
body = '401 Unauthorized: Auth Key invalid\n'
return HTTPUnauthorized(body=body)(env, start_response)
def filter_factory(global_conf, **local_conf):
"""Returns the WSGI filter for use with paste.deploy."""
conf = global_conf.copy()
conf.update(local_conf)
max_keys_per_container = int(conf.get('max_keys_per_container',
DEFAULT_MAX_KEYS_PER_CONTAINER))
swift_utils.register_swift_info(
'containerkeys',
max_keys_per_container=max_keys_per_container)
def auth_filter(app):
return ContainerKeys(
app, conf,
max_keys_per_container=max_keys_per_container)
return auth_filter
| [
"[email protected]"
] | |
7af4bd620c1eacbd1e6a1703e168ef6bebae1672 | 4875108ee320efe3e17346e35359f938af7146b1 | /test.py | 63c6f35d1d049bbe0fb91c7e5c802960d0956c47 | [
"BSD-2-Clause"
] | permissive | liuqingci/PSpider | 504db41f69e61beaf22face04defe39a38a20843 | 45fffd5395027cb930898b9b728ba6f3d36bc034 | refs/heads/master | 2020-06-02T23:18:08.101128 | 2019-06-11T02:54:22 | 2019-06-11T06:13:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,083 | py | # _*_ coding: utf-8 _*_
"""
test.py by xianhu
"""
import re
import sys
import spider
import random
import logging
import datetime
import requests
from bs4 import BeautifulSoup
requests.packages.urllib3.disable_warnings()
class MyFetcher(spider.Fetcher):
"""
fetcher module, rewrite url_fetch()
"""
def url_fetch(self, priority: int, url: str, keys: dict, deep: int, repeat: int, proxies=None):
response = requests.get(url, params=None, headers={}, data=None, proxies=proxies, verify=False, allow_redirects=True, timeout=(3.05, 10))
response.raise_for_status()
# test error-logging
assert random.randint(0, 100) != 8, "error-in-fetcher"
return 1, (response.status_code, response.url, response.text), 1
class MyParser(spider.Parser):
"""
parser module, rewrite htm_parse()
"""
def __init__(self, max_deep=0):
"""
constructor
"""
self._max_deep = max_deep
return
def htm_parse(self, priority: int, url: str, keys: dict, deep: int, content: object):
status_code, url_now, html_text = content
# test multi-processing(heavy time)
[BeautifulSoup(html_text, "lxml") for _ in range(10)]
url_list = []
if (self._max_deep < 0) or (deep < self._max_deep):
re_group = re.findall(r"<a.+?href=\"(?P<url>.{5,}?)\".*?>", html_text, flags=re.IGNORECASE)
url_list = [(spider.get_url_legal(_url, base_url=url), keys, priority+1) for _url in re_group]
# save_list can be list / tuple / dict
title = re.search(r"<title>(?P<title>.+?)</title>", html_text, flags=re.IGNORECASE)
# save_list = [(url, title.group("title").strip(), datetime.datetime.now()), ] if title else []
save_list = [{"url": url, "title": title.group("title").strip(), "datetime": datetime.datetime.now()}, ] if title else {}
# test error-logging
assert random.randint(0, 100) != 8, "error-in-parser"
return 1, url_list, save_list
class MySaver(spider.Saver):
"""
saver module, rewrite item_save()
"""
def __init__(self, save_pipe=sys.stdout):
"""
constructor
"""
self._save_pipe = save_pipe
return
def item_save(self, url: str, keys: dict, item: (list, tuple, dict)):
# item can be list / tuple / dict
# self._save_pipe.write("\t".join([str(col) for col in item]) + "\n")
self._save_pipe.write("\t".join([item["url"], item["title"], str(item["datetime"])]) + "\n")
self._save_pipe.flush()
return 1, None
class MyProxies(spider.Proxieser):
"""
proxies module, only rewrite proxies_get()
"""
def proxies_get(self):
response = requests.get("http://xxxx.com/proxies")
proxies_result = [{"http": "http://%s" % ipport, "https": "https://%s" % ipport} for ipport in response.text.split("\n")]
return 1, proxies_result
def test_spider():
"""
test spider
"""
# initial fetcher / parser / saver / proxieser
fetcher = MyFetcher(sleep_time=0, max_repeat=3)
parser = MyParser(max_deep=2)
saver = MySaver(save_pipe=open("out.txt", "w"))
proxieser = MyProxies(sleep_time=5)
# define url_filter
url_filter = spider.UrlFilter(white_patterns=(re.compile(r"^http[s]?://(www\.)?appinn\.com"), ), capacity=None)
# initial web_spider
# web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=None, url_filter=url_filter, queue_parse_size=-1)
web_spider = spider.WebSpider(fetcher, parser, saver, proxieser=proxieser, url_filter=url_filter, queue_parse_size=100, queue_proxies_size=100)
# add start url
web_spider.set_start_url("https://www.appinn.com/", priority=0, keys={"type": "index"}, deep=0)
# start web_spider
web_spider.start_working(fetcher_num=20)
# wait for finished
web_spider.wait_for_finished()
return
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING, format="%(asctime)s\t%(levelname)s\t%(message)s")
test_spider()
exit()
| [
"[email protected]"
] | |
a3bb06d33689b7e7c1437a306a443d4701cb84c1 | f2889a13368b59d8b82f7def1a31a6277b6518b7 | /2007.py | 21c2d0bc31844393f9fde2d7322b2be12978ac4f | [] | no_license | htl1126/leetcode | dacde03de5c9c967e527c4c3b29a4547154e11b3 | c33559dc5e0bf6879bb3462ab65a9446a66d19f6 | refs/heads/master | 2023-09-01T14:57:57.302544 | 2023-08-25T15:50:56 | 2023-08-25T15:50:56 | 29,514,867 | 7 | 1 | null | null | null | null | UTF-8 | Python | false | false | 558 | py | # Ref: https://leetcode.com/problems/find-original-array-from-doubled-array/discuss/1470959/JavaC%2B%2BPython-Match-from-the-Smallest-or-Biggest-100
class Solution:
def findOriginalArray(self, changed: List[int]) -> List[int]:
c = collections.Counter(changed)
if c[0] % 2 == 1:
return []
for n in sorted(c): # need to sort the keys
if c[n] > c[n * 2]:
return []
c[n * 2] -= c[n] if n > 0 else c[n] // 2 # keep half number of zeros to return
return list(c.elements())
| [
"[email protected]"
] | |
4fb8a6613c23a11b15ea041eb6992e6c99f4496a | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/852-tideGauge.py | 23b76be58b869b4adff4020388a4f358c87eeb26 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 852
y = 853
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"[email protected]"
] | |
7c254f2c139a62a672f3fceeef3f56306b55cf97 | 1cbcf8660d3ea833b0a9aa3d36fe07839bc5cfc5 | /apps/commons/__init__.py | 0f0e0b7adb01e7e6695254fbea30187fca7ed79c | [] | no_license | zhanghe06/migration_project | f77776969907740494281ac6d7485f35d4765115 | 0264b292873b211bfeca0d645cc41abc9efe883f | refs/heads/master | 2022-12-12T10:55:43.475939 | 2019-09-29T09:19:13 | 2019-09-29T09:19:13 | 185,584,884 | 0 | 1 | null | 2022-12-08T05:04:58 | 2019-05-08T10:31:57 | Python | UTF-8 | Python | false | false | 251 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@author: zhanghe
@software: PyCharm
@file: __init__.py.py
@time: 2019-04-26 20:06
"""
def func():
pass
class Main(object):
def __init__(self):
pass
if __name__ == '__main__':
pass
| [
"[email protected]"
] | |
4501f26cfb2bffa45db443d6c895bedead0dabe5 | 5ec7d0bad8a77c79843a2813f5effcb3a2b7e288 | /lean/components/config/cli_config_manager.py | 39f62229b4180cd6dba56d0480bf199a2f862c96 | [
"Apache-2.0"
] | permissive | xdpknx/lean-cli | aca9b9c9c4e156c9faefcfa8ccdfc20423b510a0 | c1051bd3e8851ae96f6e84f608a7116b1689c9e9 | refs/heads/master | 2023-08-08T02:30:09.827647 | 2021-09-21T21:36:24 | 2021-09-21T21:36:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,050 | py | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean CLI v1.0. Copyright 2021 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from lean.components.config.storage import Storage
from lean.constants import DEFAULT_ENGINE_IMAGE, DEFAULT_RESEARCH_IMAGE
from lean.models.docker import DockerImage
from lean.models.errors import MoreInfoError
from lean.models.options import ChoiceOption, Option
class CLIConfigManager:
"""The CLIConfigManager class contains all configurable CLI options."""
def __init__(self, general_storage: Storage, credentials_storage: Storage) -> None:
"""Creates a new CLIConfigManager instance.
:param general_storage: the Storage instance for general, non-sensitive options
:param credentials_storage: the Storage instance for credentials
"""
self.user_id = Option("user-id",
"The user id used when making authenticated requests to the QuantConnect API.",
True,
credentials_storage)
self.api_token = Option("api-token",
"The API token used when making authenticated requests to the QuantConnect API.",
True,
credentials_storage)
self.default_language = ChoiceOption("default-language",
"The default language used when creating new projects.",
["python", "csharp"],
False,
general_storage)
self.engine_image = Option("engine-image",
f"The Docker image used when running the LEAN engine ({DEFAULT_ENGINE_IMAGE} if not set).",
False,
general_storage)
self.research_image = Option("research-image",
f"The Docker image used when running the research environment ({DEFAULT_RESEARCH_IMAGE} if not set).",
False,
general_storage)
self.all_options = [
self.user_id,
self.api_token,
self.default_language,
self.engine_image,
self.research_image
]
def get_option_by_key(self, key: str) -> Option:
"""Returns the option matching the given key.
If no option with the given key exists, an error is raised.
:param key: the key to look for
:return: the option having a key equal to the given key
"""
option = next((x for x in self.all_options if x.key == key), None)
if option is None:
raise MoreInfoError(f"There doesn't exist an option with key '{key}'",
"https://www.lean.io/docs/lean-cli/api-reference/lean-config-set#02-Description")
return option
def get_engine_image(self, override: Optional[str] = None) -> DockerImage:
"""Returns the LEAN engine image to use.
:param override: the image name to use, overriding any defaults or previously configured options
:return: the image that should be used when running the LEAN engine
"""
return self._get_image_name(self.engine_image, DEFAULT_ENGINE_IMAGE, override)
def get_research_image(self, override: Optional[str] = None) -> DockerImage:
"""Returns the LEAN research image to use.
:param override: the image name to use, overriding any defaults or previously configured options
:return: the image that should be used when running the research environment
"""
return self._get_image_name(self.research_image, DEFAULT_RESEARCH_IMAGE, override)
def _get_image_name(self, option: Option, default: str, override: Optional[str]) -> DockerImage:
"""Returns the image to use.
:param option: the CLI option that configures the image type
:param override: the image name to use, overriding any defaults or previously configured options
:param default: the default image to use when the option is not set and no override is given
:return: the image to use
"""
if override is not None:
image = override
else:
image = option.get_value(default)
return DockerImage.parse(image)
| [
"[email protected]"
] | |
caab84331a286957688308ab6662f76886565cad | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/KZZMM/YW_KZZMM_SHXJ_082.py | 8be9a19d9eac064c5c1eff466069cccce9c4febe | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,053 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_KZZMM_SHXJ_082(xtp_test_case):
# YW_KZZMM_SHXJ_082
def test_YW_KZZMM_SHXJ_082(self):
title = '默认3:订单报价未超过涨跌幅限制-沪A限价卖=跌停价'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': ['未成交','全成','部成'][trade_type],
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('110032', '1', '8', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SH_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_LIMIT'],
'price': 100,
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
a9cfb2037e52211b220afeeb004deb4751481476 | 7fb2fa25c86a824343b6ca0974978db6b12e5590 | /analysis/count_emojis.py | 4921e65519970758f36b23edfb37ee5697942392 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pmichel31415/mtnt | ff3d6f509e665c525946dd0635904cb0a3f9766b | 2a7e9a36b36bd1e95d6d8ab9f28f1d9359240807 | refs/heads/master | 2021-04-06T02:25:17.441054 | 2019-04-02T18:13:05 | 2019-04-02T18:13:05 | 125,262,764 | 61 | 7 | MIT | 2019-02-26T16:01:56 | 2018-03-14T19:22:44 | Python | UTF-8 | Python | false | false | 477 | py | #!/usr/bin/python3
"""
Count the number of emojis in the input
"""
import sys
import emoji
import re
txt_emoji_regex = re.compile(r'(8|:|;|=)(\^|\'|-)?(\)|\(|D|P|p)')
utf8_emoji_regex = emoji.get_emoji_regexp()
N = 0
try:
for line in sys.stdin:
for w in line.strip().split():
if txt_emoji_regex.search(w) or utf8_emoji_regex.search(w):
N += 1
except (KeyboardInterrupt, EOFError):
pass
finally:
print(N)
| [
"[email protected]"
] | |
81bbfc87bb3dd84e11481ba72a90d7e8911c8a21 | caed98915a93639e0a56b8296c16e96c7d9a15ab | /Math/Power of Three.py | be04323e1373f2ab7d85558dda6dce5cefca560e | [] | no_license | PiyushChandra17/365-Days-Of-LeetCode | 0647787ec7e8f1baf10b6bfc687bba06f635838c | 7e9e9d146423ca2c5b1c6a3831f21dd85fa376d5 | refs/heads/main | 2023-02-13T10:41:36.110303 | 2021-01-17T11:58:51 | 2021-01-17T11:58:51 | 319,974,573 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 101 | py | class Solution:
def isPowerOfThree(self, n: int) -> bool:
return n > 0 and 3**19 % n == 0 | [
"[email protected]"
] | |
b5b632dd44c11204aab8b8a20e484fef52e27155 | cf652cb90f9d6b22b5943e7d025af631214a904d | /gui/qt/masternode_widgets.py | 2d82f068da1566cab334c27284b78794da106afd | [
"MIT"
] | permissive | ddude1/TestLite | 02919c68013d2ede9195d618d94260b842a5e292 | 3f3c00e4ef03dd9b23b99b02f9a8895da8d65aef | refs/heads/master | 2022-12-11T12:22:25.029101 | 2018-06-13T14:11:51 | 2018-06-13T14:11:51 | 136,489,568 | 0 | 0 | MIT | 2022-09-23T21:47:03 | 2018-06-07T14:31:31 | Python | UTF-8 | Python | false | false | 16,092 | py | """Masternode-related widgets."""
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from electrum_xgox import bitcoin
from electrum_xgox.bitcoin import COIN
from electrum_xgox.i18n import _
from electrum_xgox.masternode import NetworkAddress, MasternodeAnnounce
from . import util
def masternode_status(status):
"""Get a human-friendly representation of status.
Returns a 3-tuple of (enabled, one_word_description, description).
"""
statuses = {
'PRE_ENABLED': (True, _('Enabling'), _('Waiting for masternode to enable itself.')),
'ENABLED': (True, _('Enabled'), _('Masternode is enabled.')),
'EXPIRED': (False, _('Disabled'), _('Masternode failed to ping the network and was disabled.')),
'VIN_SPENT': (False, _('Disabled'), _('Collateral payment has been spent.')),
'REMOVE': (False, _('Disabled'), _('Masternode failed to ping the network and was disabled.')),
}
if statuses.get(status):
return statuses[status]
elif status is False:
return (False, _('N/A'), _('Masternode has not been seen on the network.'))
return (False, _('Unknown'), _('Unknown masternode status.'))
class NetworkAddressWidget(QWidget):
"""Widget that represents a network address."""
def __init__(self, parent=None):
super(NetworkAddressWidget, self).__init__(parent)
self.ip_edit = QLineEdit()
self.port_edit = QSpinBox()
self.port_edit.setRange(0, 99999)
hbox = QHBoxLayout()
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(QLabel(_('IP:')))
hbox.addWidget(self.ip_edit, stretch=1)
hbox.addWidget(QLabel(_('Port:')))
hbox.addWidget(self.port_edit, stretch=1)
self.setLayout(hbox)
@pyqtProperty(str)
def string(self):
return '%s:%d' % (str(self.ip_edit.text()), self.port_edit.value())
@string.setter
def string(self, value):
s = str(value).split(':')
ip = ''
port = 0
if len(s) > 0:
ip = s[0]
if len(s) > 1:
port = int(s[1])
self.ip_edit.setText(ip)
self.port_edit.setValue(port)
def get_addr(self):
"""Get a NetworkAddress instance from this widget's data."""
ip = str(self.ip_edit.text())
port = self.port_edit.value()
if self.validate_ip(ip):
return NetworkAddress(ip=ip, port=port)
return NetworkAddress()
# TODO IPv6 support.
def validate_ip(self, s):
try:
ip = s.split('.')
if len(ip) != 4:
raise Exception('Invalid length')
for i in ip:
if int(i) < 0 or int(i) > 255:
raise ValueError('Invalid IP byte')
except Exception:
return False
return True
class PrevOutWidget(QWidget):
"""Widget that represents a previous outpoint."""
def __init__(self, parent=None):
super(PrevOutWidget, self).__init__(parent)
self.vin = {}
self.hash_edit = QLineEdit()
self.hash_edit.setPlaceholderText(_('The TxID of your 1000 XGOX output'))
self.index_edit = QLineEdit()
self.index_edit.setPlaceholderText(_('The output number of your 1000 XGOX output'))
self.address_edit = QLineEdit()
self.address_edit.setPlaceholderText(_('The address that 1000 XGOX was sent to'))
# Collection of fields so that it's easier to act on them all at once.
self.fields = (self.hash_edit, self.index_edit, self.address_edit)
for i in self.fields:
i.setFont(QFont(util.MONOSPACE_FONT))
form = QFormLayout()
form.setContentsMargins(0, 0, 0, 0)
form.addRow(_('TxID:'), self.hash_edit)
form.addRow(_('Output Index:'), self.index_edit)
form.addRow(_('Address:'), self.address_edit)
self.setLayout(form)
@pyqtProperty(str)
def string(self):
return self.get_str()
@string.setter
def string(self, value):
return self.set_str(str(value))
def get_str(self):
values = [str(self.hash_edit.text()), str(self.index_edit.text()), str(self.address_edit.text())]
values.append(str(self.vin.get('value', '')))
values.append(self.vin.get('scriptSig', ''))
return ':'.join(values)
def set_str(self, value):
s = str(value).split(':')
values = []
try:
values.append(('prevout_hash', s[0]))
values.append(('prevout_n', int(s[1])))
values.append(('address', s[2]))
values.append(('value', int(s[3])))
values.append(('scriptSig', s[4]))
# Don't fail if not all values are present.
except (IndexError, ValueError):
pass
vin = {k: v for k, v in values}
self.set_dict(vin)
def get_dict(self):
d = {}
txid = str(self.hash_edit.text())
if not txid:
return d
index = str(self.index_edit.text())
if not index:
index = '0'
address = str(self.address_edit.text())
d['prevout_hash'] = txid
d['prevout_n'] = int(index)
d['address'] = address
if self.vin:
d['value'] = int(self.vin.get('value', '0'))
d['scriptSig'] = self.vin.get('scriptSig', '')
return d
def set_dict(self, d):
self.hash_edit.setText(d.get('prevout_hash', ''))
self.index_edit.setText(str(d.get('prevout_n', '')))
self.address_edit.setText(d.get('address', ''))
self.vin = dict(d)
def clear(self):
for widget in self.fields:
widget.clear()
self.vin = {}
def setReadOnly(self, isreadonly):
for widget in self.fields:
widget.setReadOnly(isreadonly)
class MasternodeEditor(QWidget):
"""Editor for masternodes."""
def __init__(self, parent=None):
super(MasternodeEditor, self).__init__(parent)
self.alias_edit = QLineEdit()
self.alias_edit.setPlaceholderText(_('Enter a name for this masternode'))
self.vin_edit = PrevOutWidget()
self.addr_edit = NetworkAddressWidget()
self.delegate_key_edit = QLineEdit()
self.delegate_key_edit.setFont(QFont(util.MONOSPACE_FONT))
self.delegate_key_edit.setPlaceholderText(_('Your masternode\'s private key'))
self.protocol_version_edit = QLineEdit()
self.protocol_version_edit.setText('70201')
self.status_edit = QLineEdit()
self.status_edit.setPlaceholderText(_('Masternode status'))
self.status_edit.setReadOnly(True)
form = QFormLayout()
form.addRow(_('Alias:'), self.alias_edit)
form.addRow(_('Status:'), self.status_edit)
form.addRow(_('Collateral XGOX Output:'), self.vin_edit)
form.addRow(_('Masternode Private Key:'), self.delegate_key_edit)
form.addRow(_('Address:'), self.addr_edit)
form.addRow(_('Protocol Version:'), self.protocol_version_edit)
self.setLayout(form)
def get_masternode_args(self):
"""Get MasternodeAnnounce keyword args from this widget's data."""
kwargs = {}
kwargs['alias'] = str(self.alias_edit.text())
kwargs['vin'] = self.vin_edit.get_dict()
kwargs['addr'] = self.addr_edit.get_addr()
protocol_version = str(self.protocol_version_edit.text())
if protocol_version:
kwargs['protocol_version'] = int(protocol_version)
return kwargs
class MasternodeOutputsWidget(QListWidget):
"""Widget that displays available masternode outputs."""
outputSelected = pyqtSignal(dict, name='outputSelected')
def __init__(self, parent=None):
super(MasternodeOutputsWidget, self).__init__(parent)
self.outputs = {}
self.setSelectionMode(QAbstractItemView.SingleSelection)
self.selectionModel().selectionChanged.connect(self.on_selection_changed)
def sizeHint(self):
return QSize(256, 60)
def add_output(self, d):
"""Add a valid output."""
label = '%s:%s' % (d['prevout_hash'], d['prevout_n'])
self.outputs[label] = d
item = QListWidgetItem(label)
item.setFont(QFont(util.MONOSPACE_FONT))
self.addItem(item)
def add_outputs(self, outputs):
list(map(self.add_output, outputs))
self.setCurrentRow(0)
def clear(self):
super(MasternodeOutputsWidget, self).clear()
self.outputs.clear()
def on_selection_changed(self, selected, deselected):
"""Emit the selected output."""
items = self.selectedItems()
if not items:
return
self.outputSelected.emit(self.outputs[str(items[0].text())])
class MasternodeOutputsTab(QWidget):
"""Widget that is used to select a masternode output."""
def __init__(self, parent):
super(MasternodeOutputsTab, self).__init__(parent)
self.dialog = parent
self.manager = parent.manager
include_frozen_checkbox = QCheckBox(_('Include frozen addresses'))
include_frozen_checkbox.setChecked(False)
self.scan_outputs_button = QPushButton(_('Scan For Masternode Outputs'))
def on_scan_outputs():
"""Call scan_for_outputs() with whether to include frozen addresses."""
self.scan_for_outputs(include_frozen_checkbox.isChecked())
self.scan_outputs_button.clicked.connect(on_scan_outputs)
self.status_edit = QLineEdit()
self.status_edit.setReadOnly(True)
self.valid_outputs_list = MasternodeOutputsWidget()
self.valid_outputs_list.outputSelected.connect(self.set_output)
self.collateral_edit = PrevOutWidget()
self.collateral_edit.setReadOnly(True)
self.mapper = QDataWidgetMapper()
self.mapper.setSubmitPolicy(QDataWidgetMapper.ManualSubmit)
self.mapper.setModel(self.dialog.masternodes_widget.proxy_model)
model = self.dialog.masternodes_widget.model
self.mapper.addMapping(self.collateral_edit, model.VIN, b'string')
self.save_output_button = QPushButton(_('Save'))
self.save_output_button.setEnabled(False)
self.save_output_button.clicked.connect(self.save_output)
vbox = QVBoxLayout()
desc = ' '.join(['Use this tab to scan for and choose a collateral payment for your masternode.',
'A valid collateral payment is exactly 1000 XGOX.'])
desc = QLabel(_(desc))
desc.setWordWrap(True)
vbox.addWidget(desc)
status_box = QHBoxLayout()
status_box.setContentsMargins(0, 0, 0, 0)
status_box.addWidget(QLabel(_('Status:')))
status_box.addWidget(self.status_edit, stretch=1)
vbox.addLayout(status_box)
valid_outputs_box = QVBoxLayout()
valid_outputs_box.setContentsMargins(0, 0, 0, 0)
valid_outputs_box.addWidget(QLabel(_('Masternode Outputs:')))
valid_outputs_box.addWidget(self.valid_outputs_list)
vbox.addLayout(util.Buttons(include_frozen_checkbox, self.scan_outputs_button))
vbox.addLayout(valid_outputs_box)
vbox.addWidget(self.collateral_edit)
vbox.addLayout(util.Buttons(self.save_output_button))
self.setLayout(vbox)
def scan_for_outputs(self, include_frozen):
"""Scan for 1000 XGOX outputs.
If one or more is found, populate the list and enable the sign button.
"""
self.valid_outputs_list.clear()
exclude_frozen = not include_frozen
coins = list(self.manager.get_masternode_outputs(exclude_frozen=exclude_frozen))
if len(coins) > 0:
self.valid_outputs_list.add_outputs(coins)
else:
self.status_edit.setText(_('No 1000 XGOX outputs were found.'))
self.status_edit.setStyleSheet(util.ColorScheme.RED.as_stylesheet())
def set_output(self, vin):
"""Set the selected output."""
self.collateral_edit.set_dict(vin)
self.save_output_button.setEnabled(True)
def save_output(self):
"""Save the selected output as the current masternode's collateral."""
self.mapper.submit()
# Determine the masternode's collateral key using this output.
self.dialog.populate_collateral_key()
def set_mapper_index(self, row):
"""Set the row that the data widget mapper should use."""
self.valid_outputs_list.clear()
self.status_edit.clear()
self.status_edit.setStyleSheet(util.ColorScheme.DEFAULT.as_stylesheet())
self.mapper.setCurrentIndex(row)
mn = self.dialog.masternodes_widget.masternode_for_row(row)
status_text = _('Masternode has no collateral payment assigned.')
can_scan = not mn.announced
# Disable the scan_outputs button if the masternode already has an assigned output.
if mn.vin.get('value', 0) == COIN * 1000:
can_scan = False
self.valid_outputs_list.add_output(mn.vin)
status_text = _('Masternode already has a collateral payment.')
self.status_edit.setText(_(status_text))
self.scan_outputs_button.setEnabled(can_scan)
class SignAnnounceWidget(QWidget):
"""Widget that displays information about signing a Masternode Announce."""
def __init__(self, parent):
super(SignAnnounceWidget, self).__init__(parent)
self.dialog = parent
self.manager = parent.manager
# Displays the status of the masternode.
self.status_edit = QLineEdit()
self.status_edit.setReadOnly(True)
self.alias_edit = QLineEdit()
self.collateral_edit = PrevOutWidget()
self.delegate_edit = QLineEdit()
self.delegate_edit.setFont(QFont(util.MONOSPACE_FONT))
for i in [self.alias_edit, self.collateral_edit, self.delegate_edit]:
i.setReadOnly(True)
self.mapper = QDataWidgetMapper()
self.mapper.setSubmitPolicy(QDataWidgetMapper.ManualSubmit)
self.mapper.setModel(self.dialog.masternodes_widget.proxy_model)
model = self.dialog.masternodes_widget.model
self.mapper.addMapping(self.alias_edit, model.ALIAS)
self.mapper.addMapping(self.collateral_edit, model.VIN, b'string')
self.mapper.addMapping(self.delegate_edit, model.DELEGATE)
self.sign_button = QPushButton(_('Activate Masternode'))
self.sign_button.setEnabled(False)
self.sign_button.clicked.connect(self.sign_announce)
status_box = QHBoxLayout()
status_box.setContentsMargins(0, 0, 0, 0)
status_box.addWidget(QLabel(_('Status:')))
status_box.addWidget(self.status_edit, stretch=1)
vbox = QVBoxLayout()
vbox.addLayout(status_box)
form = QFormLayout()
form.addRow(_('Alias:'), self.alias_edit)
form.addRow(_('Collateral XGOX Output:'), self.collateral_edit)
form.addRow(_('Masternode Private Key:'), self.delegate_edit)
vbox.addLayout(form)
vbox.addLayout(util.Buttons(self.sign_button))
self.setLayout(vbox)
def set_mapper_index(self, row):
"""Set the row that the data widget mapper should use."""
self.status_edit.clear()
self.status_edit.setStyleSheet(util.ColorScheme.DEFAULT.as_stylesheet())
self.mapper.setCurrentIndex(row)
mn = self.dialog.masternodes_widget.masternode_for_row(row)
# Disable the sign button if the masternode can't be signed (for whatever reason).
status_text = '%s can be activated' % mn.alias
can_sign = True
try:
self.manager.check_can_sign_masternode(mn.alias)
except Exception as e:
status_text = str(e)
can_sign = False
self.status_edit.setText(_(status_text))
self.sign_button.setEnabled(can_sign)
def sign_announce(self):
"""Set the masternode's vin and sign an announcement."""
self.mapper.submit()
self.dialog.sign_announce(str(self.alias_edit.text()))
| [
"[email protected]"
] | |
56aea531c535900df425e4611b4776b282f6fa44 | 46ae8264edb9098c9875d2a0a508bc071201ec8b | /res/scripts/client/gui/sharedclancache.py | 1aa1b48a5532bacd3c62029590bc3c3bcdcd0503 | [] | no_license | Difrex/wotsdk | 1fc6156e07e3a5302e6f78eafdea9bec4c897cfb | 510a34c67b8f4c02168a9830d23f5b00068d155b | refs/heads/master | 2021-01-01T19:12:03.592888 | 2016-10-08T12:06:04 | 2016-10-08T12:06:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,099 | py | # Embedded file name: scripts/client/gui/shared/ClanCache.py
from collections import namedtuple
import BigWorld
from Event import Event
from account_helpers import getAccountDatabaseID
from adisp import async, process
from constants import CLAN_MEMBER_FLAGS
from debug_utils import LOG_ERROR
from helpers import html
from gui.clans.formatters import getClanRoleString
from gui.shared.fortifications.fort_provider import ClientFortProvider
from gui.shared.utils import code2str
from messenger.ext import passCensor
from messenger.proto.events import g_messengerEvents
from messenger.storage import storage_getter
class ClanInfo(namedtuple('ClanInfo', ['clanName',
'clanAbbrev',
'chatChannelDBID',
'memberFlags',
'enteringTime'])):
def getClanName(self):
return self.clanName
def getClanAbbrev(self):
return self.clanAbbrev
def getMembersFlags(self):
return self.memberFlags
def getJoiningTime(self):
return self.enteringTime
class _ClanCache(object):
def __init__(self):
self.__waitForSync = False
self.__fortProvider = None
self.__clanMembersLen = None
self.__clanMotto = ''
self.__clanDescription = ''
self.onSyncStarted = Event()
self.onSyncCompleted = Event()
return
def init(self):
self.__fortProvider = ClientFortProvider()
def fini(self):
self.onSyncStarted.clear()
self.onSyncCompleted.clear()
self.clear()
def onAccountShowGUI(self):
self.__startFortProvider()
def onAvatarBecomePlayer(self):
self.__stopFortProvider()
def onDisconnected(self):
self.__stopFortProvider()
@property
def waitForSync(self):
return self.__waitForSync
@async
def update(self, diff = None, callback = None):
self.__invalidateData(diff, callback)
def clear(self):
self.__fortProvider = None
return
@storage_getter('users')
def usersStorage(self):
return None
@property
def fortProvider(self):
return self.__fortProvider
@property
def clanDBID(self):
from gui.shared import g_itemsCache
return g_itemsCache.items.stats.clanDBID
@property
def isInClan(self):
"""
@return: is current player in clan
"""
return self.clanDBID is not None and self.clanDBID != 0
@property
def clanMembers(self):
members = set()
if self.isInClan:
members = set(self.usersStorage.getClanMembersIterator(False))
return members
@property
def clanInfo(self):
from gui.shared import g_itemsCache
info = g_itemsCache.items.stats.clanInfo
if info and len(info) > 1:
return info
else:
return (None, None, -1, 0, 0)
@property
def clanName(self):
return passCensor(html.escape(self.clanInfo[0]))
@property
def clanAbbrev(self):
return self.clanInfo[1]
@property
def clanMotto(self):
return self.__clanMotto
@property
def clanDescription(self):
return self.__clanDescription
@property
def clanTag(self):
result = self.clanAbbrev
if result:
return '[%s]' % result
return result
@property
def clanCommanderName(self):
for member in self.clanMembers:
if member.getClanRole() == CLAN_MEMBER_FLAGS.LEADER:
return member.getName()
return None
@property
def clanRole(self):
user = self.usersStorage.getUser(getAccountDatabaseID())
if user:
role = user.getClanRole()
else:
role = 0
return role
@property
def isClanLeader(self):
return self.clanRole == CLAN_MEMBER_FLAGS.LEADER
@async
@process
def getClanEmblemID(self, callback):
clanEmblem = None
if self.isInClan:
tID = 'clanInfo' + BigWorld.player().name
clanEmblem = yield self.getClanEmblemTextureID(self.clanDBID, False, tID)
callback(clanEmblem)
return
@async
def getFileFromServer(self, clanId, fileType, callback):
if not BigWorld.player().serverSettings['file_server'].has_key(fileType):
LOG_ERROR("Invalid server's file type: %s" % fileType)
self._valueResponse(0, (None, None), callback)
return None
else:
clan_emblems = BigWorld.player().serverSettings['file_server'][fileType]
BigWorld.player().customFilesCache.get(clan_emblems['url_template'] % clanId, lambda url, file: self._valueResponse(0, (url, file), callback), True)
return None
@async
@process
def getClanEmblemTextureID(self, clanDBID, isBig, textureID, callback):
import imghdr
if clanDBID is not None and clanDBID != 0:
_, clanEmblemFile = yield self.getFileFromServer(clanDBID, 'clan_emblems_small' if not isBig else 'clan_emblems_big')
if clanEmblemFile and imghdr.what(None, clanEmblemFile) is not None:
BigWorld.wg_addTempScaleformTexture(textureID, clanEmblemFile)
callback(textureID)
return
callback(None)
return
def getClanRoleUserString(self):
position = self.clanInfo[3]
return getClanRoleString(position)
def onClanInfoReceived(self, clanDBID, clanName, clanAbbrev, clanMotto, clanDescription):
self.__clanMotto = passCensor(html.escape(clanMotto))
self.__clanDescription = passCensor(html.escape(clanDescription))
def _valueResponse(self, resID, value, callback):
if resID < 0:
LOG_ERROR('[class %s] There is error while getting data from cache: %s[%d]' % (self.__class__.__name__, code2str(resID), resID))
return callback(value)
callback(value)
def _onResync(self):
if not self.__waitForSync:
self.__invalidateData()
def __invalidateData(self, diff = None, callback = lambda *args: None):
if diff is not None:
if 'stats' in diff and 'clanInfo' in diff['stats']:
self.__fortProvider.resetState()
callback(True)
return
def __startFortProvider(self):
self.__clanMembersLen = len(self.clanMembers)
g_messengerEvents.users.onClanMembersListChanged += self.__me_onClanMembersListChanged
self.__fortProvider.start(self)
def __stopFortProvider(self):
self.__clanMembersLen = None
g_messengerEvents.users.onClanMembersListChanged -= self.__me_onClanMembersListChanged
self.__fortProvider.stop()
return
def __me_onClanMembersListChanged(self):
clanMembersLen = len(self.clanMembers)
if self.__clanMembersLen is not None and clanMembersLen != self.__clanMembersLen:
self.__clanMembersLen = clanMembersLen
self.__fortProvider.resetState()
self.__fortProvider.notify('onClanMembersListChanged')
return
g_clanCache = _ClanCache() | [
"[email protected]"
] | |
0ce2e5cbbf0829c6f3a636aa81c11c8deda2ed4b | 2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02 | /PyTorch/dev/cv/detection/YOLOX_ID2833_for_PyTorch/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py | 176131d6ee71e712688423706383ed6a3a13e405 | [
"GPL-1.0-or-later",
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | Ascend/ModelZoo-PyTorch | 4c89414b9e2582cef9926d4670108a090c839d2d | 92acc188d3a0f634de58463b6676e70df83ef808 | refs/heads/master | 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 | Apache-2.0 | 2022-10-15T09:29:12 | 2022-04-20T04:11:18 | Python | UTF-8 | Python | false | false | 1,498 | py |
# Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (c) Open-MMLab. All rights reserved.
_base_ = [
'../_base_/models/cascade_rcnn_r50_fpn.py',
'../_base_/datasets/coco_detection.py',
'../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'
]
model = dict(
backbone=dict(
type='DetectoRS_ResNet',
conv_cfg=dict(type='ConvAWS'),
output_img=True),
neck=dict(
type='RFP',
rfp_steps=2,
aspp_out_channels=64,
aspp_dilations=(1, 3, 6, 1),
rfp_backbone=dict(
rfp_inplanes=256,
type='DetectoRS_ResNet',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
conv_cfg=dict(type='ConvAWS'),
pretrained='torchvision://resnet50',
style='pytorch')))
| [
"[email protected]"
] | |
b58d8677ccb0a7cdfe14ea57afee51438b6116fa | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /nb836onw9bek4FPDt_16.py | 69b55a2efffa5340071d2d3f67af082fc7f2d0f0 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 239 | py |
def count_same_ends(txt):
c = 0
txt = txt.lower()
txt = txt.replace("!", "")
txt = txt.replace(".", "")
words = txt.split()
for word in words:
if word[0] == word[len(word) - 1] and len(word) != 1:
c += 1
return c
| [
"[email protected]"
] | |
10f48cf4c5089b09e6141c69acf9be9eddadb417 | 49c64efe5249a287c33893131d0e903c43d48556 | /saloon_api/manage.py | a72546a7b615d3959d6df89eb63dc31d3df45f2d | [] | no_license | mumarkhan999/django_rest_practice_projects | 4d671c96eb7708f07645d9ec698ab259b640bab6 | 5e5bf76362873f6d8d3ac41f050fad75bbf8ca8c | refs/heads/master | 2020-04-17T08:20:52.949068 | 2019-01-18T13:28:50 | 2019-01-18T13:47:28 | 166,408,389 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'saloon_api.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
7388a331af4567a46dc4b438a6216cfde308fd11 | edd8ad3dcb6ee9b019c999b712f8ee0c468e2b81 | /Python 300/11. Class/284.py | cbe6087088e53713cea580554af042f416b7832d | [] | no_license | narinn-star/Python | 575cba200de35b9edf3832c4e41ccce657075751 | 14eba211cd3a9e9708a30073ba5b31d21d39eeef | refs/heads/master | 2023-05-25T22:57:26.079294 | 2021-06-07T15:29:39 | 2021-06-07T15:29:39 | 331,647,462 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 386 | py | #class _ 클래스 상속
class car:
def __init__(self, wheel, price):
self.wheel = wheel
self.price = price
class bike(car):
def __init__(self,wheel, price, 구동계):
super().__init__(wheel, price) #car.__init__(self, wheel, price)
self.구동계 = 구동계
bicycle = bike(2, 100, "시마노")
print(bicycle.구동계)
print(bicycle.wheel) | [
"[email protected]"
] | |
91eaecf9d7e01d9e1e6daaa5154fc75664696fdd | 34b9b39442bde1a3c8fa670ef60bcc84d772a067 | /Assignment 6-Pandas A-Deadline Oct 31 2017/Assigment6_Marrugo/Assignment6_step1_Marrugo.py | 18084c7507efa63f8f69b9823d9f478cea967cc6 | [] | no_license | bnajafi/Scientific_Python_Assignments_POLIMI_EETBS | b398fc2754b843d63cd06d517235c16177a87dcf | 8da926e995dcaf02a297c6bb2f3120c49d6d63da | refs/heads/master | 2021-05-07T22:36:14.715936 | 2018-01-16T21:12:33 | 2018-01-16T21:12:33 | 107,265,075 | 38 | 86 | null | 2018-01-16T21:12:34 | 2017-10-17T12:24:04 | Python | UTF-8 | Python | false | false | 2,890 | py | # -*- coding: utf-8 -*-
# Assigment 6 Calculation of the example D using pandas
print "Assigment 5 Calculation of the example D using pandas\n"
# import library
import pandas as pd
#Convention resistance [Heat transfer coefficient, area]
Resistances_names=["R1in","R2","R3","R4","R5","R6","R7","R8out"]#Resistances names of the wall
Resistances_columns=["Type","Config","L","H","K","A","RValue"]
# Definition of resistances
R1in=["conv","Series",None,10,None,0.25,0]
R2=["cond","Series",0.03,None,0.026,0.25,0]
R3=["cond","Series",0.02,None,0.22,0.25,0]
R4=["cond","Parallel",0.16,None,0.22,0.015,0]
R5=["cond","Parallel",0.16,None,0.22,0.015,0]
R6=["cond","Parallel",0.16,None,0.72,0.22,0]
R7=["cond","Series",0.02,None,0.22,0.25,0]
R8out=["conv","Series",None,25,None,0.25,0]
#Creation of a 2D array
Resistances_Df=pd.DataFrame([R1in,R2,R3,R4,R5,R6,R7,R8out],index=Resistances_names,columns=Resistances_columns)
#Resistances_RValues= np.array(np.zeros(8))# Variable for store the resistances values
#Calculation of the conductive resistances
Resistances_Df["RValue"][Resistances_Df["Type"]=="cond"] = (Resistances_Df["L"][Resistances_Df["Type"]=="cond"])/((Resistances_Df["K"][Resistances_Df["Type"]=="cond"])*(Resistances_Df["A"][Resistances_Df["Type"]=="cond"]))
#Calculation of the convective resistances
Resistances_Df["RValue"][Resistances_Df["Type"]=="conv"] = 1.0 / ((Resistances_Df["A"][Resistances_Df["Type"]=="conv"])*(Resistances_Df["H"][Resistances_Df["Type"]=="conv"]))
#Total convection resistance
Resistances_convection=Resistances_Df["RValue"][Resistances_Df["Type"]=="conv"].sum()
#Total conduction resistances in series
Resistances_Series_conduction=(Resistances_Df["RValue"][Resistances_Df["Config"]=="Series"][Resistances_Df["Type"]=="cond"].sum())
#Calculation of the parallel resistances
Resistances_Df["RValue"][Resistances_Df["Config"]=="Parallel"]=1/(Resistances_Df["RValue"][Resistances_Df["Config"]=="Parallel"])
#Total conduction resistances in parallel
Resistances_Parallel_conduction=1/(Resistances_Df["RValue"][Resistances_Df["Config"]=="Parallel"].sum())
#Total resistance
R_total=Resistances_Series_conduction + Resistances_Parallel_conduction + Resistances_convection
wall=[20,-10,3,5,0.25]#wall inputs [Temperature in, Temperature out, high, wide, area ]
Qb=(wall[0]-wall[1])/R_total# Rate of heat transfer of one brick in [W]
Nb=(wall[2]*wall[3])/wall[4]# Number of bricks in the wall
Qtotal=Qb*Nb# Rate of heat tranfer of the wall in [W]
print "The total convenction resistance is ",Resistances_convection,"ºC/W \n"
print "The total conduction resistance in series is ",Resistances_Series_conduction,"ºC/W \n"
print "The total conduction resistance in parallel is ",Resistances_Parallel_conduction,"ºC/W \n"
print "The total thermal resistance is ",R_total,"ºC/W \n"
print "The heat transfer through the wall is "+str(Qtotal)+" W"
| [
"[email protected]"
] | |
3a9e841815a714e6b2cd118666d6904c176e05bd | 9d0195aa83cc594a8c61f334b90375961e62d4fe | /JTTest/SL7/CMSSW_10_2_15/src/dataRunA/nano4078.py | 10a232d6477959a0e9f45a7e843a5e600466e3b4 | [] | no_license | rsk146/CMS | 4e49592fc64f6438051544c5de18598db36ed985 | 5f8dab8c59ae556598b9747b52b88205fffc4dbe | refs/heads/master | 2022-12-01T03:57:12.126113 | 2020-08-04T03:29:27 | 2020-08-04T03:29:27 | 284,863,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,293 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: nanoAOD_jetToolbox_cff -s NANO --data --eventcontent NANOAOD --datatier NANOAOD --no_exec --conditions 102X_dataRun2_Sep2018Rereco_v1 --era Run2_2018,run2_nanoAOD_102Xv1 --customise_commands=process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False))) --customise JMEAnalysis/JetToolbox/nanoAOD_jetToolbox_cff.nanoJTB_customizeMC --filein /users/h2/rsk146/JTTest/SL7/CMSSW_10_6_12/src/ttbarCutTest/dataReprocessing/0004A5E9-9F18-6B42-B31D-4206406CE423.root --fileout file:jetToolbox_nano_datatest.root
import FWCore.ParameterSet.Config as cms
from Configuration.StandardSequences.Eras import eras
process = cms.Process('NANO',eras.Run2_2018,eras.run2_nanoAOD_102Xv1)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_AutoFromDBCurrent_cff')
process.load('PhysicsTools.NanoAOD.nano_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring('file:root://cms-xrd-global.cern.ch//store/data/Run2018A/EGamma/MINIAOD/17Sep2018-v2/100000/ECB3D11D-1624-E04A-ADB1-80D17F4B7352.root'),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('nanoAOD_jetToolbox_cff nevts:1'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODoutput = cms.OutputModule("NanoAODOutputModule",
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAOD'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:jetToolbox_nano_datatest4078.root'),
outputCommands = process.NANOAODEventContent.outputCommands
)
# Additional output definition
# Other statements
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '102X_dataRun2_Sep2018Rereco_v1', '')
# Path and EndPath definitions
process.nanoAOD_step = cms.Path(process.nanoSequence)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODoutput_step = cms.EndPath(process.NANOAODoutput)
# Schedule definition
process.schedule = cms.Schedule(process.nanoAOD_step,process.endjob_step,process.NANOAODoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nano_cff
from PhysicsTools.NanoAOD.nano_cff import nanoAOD_customizeData
#call to customisation function nanoAOD_customizeData imported from PhysicsTools.NanoAOD.nano_cff
process = nanoAOD_customizeData(process)
# Automatic addition of the customisation function from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff import nanoJTB_customizeMC
#call to customisation function nanoJTB_customizeMC imported from JMEAnalysis.JetToolbox.nanoAOD_jetToolbox_cff
process = nanoJTB_customizeMC(process)
# End of customisation functions
# Customisation from command line
process.add_(cms.Service('InitRootHandlers', EnableIMT = cms.untracked.bool(False)))
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion | [
"[email protected]"
] | |
8e71bfb294c0824e57c51307c343248ff48ae18a | addbf46c371f7d3cb51dfce6d118da8e0fd8c1f2 | /nathan-programming-puzzles-10-2-a.py | 5d53a2e9e28239d972ee3fd494170d25d1341682 | [] | no_license | nrhint/python-class | 2a5af35fc887eeb6668278d40b86b5be872ee4c4 | c70940b5e03b1858d2f6f16be6807206ec3e22bb | refs/heads/master | 2020-12-07T00:37:52.907516 | 2016-12-07T00:50:52 | 2016-12-07T00:50:52 | 67,438,180 | 0 | 0 | null | 2016-09-05T17:09:27 | 2016-09-05T17:09:27 | null | UTF-8 | Python | false | false | 154 | py | import pickle
faves = {
"food", "games", "programming"
}
favesFile = open("favorits.dat", 'wb')
pickle.dump(faves, favesFile)
favesFile.close()
| [
"[email protected]"
] | |
27d53f6361992e5a1a8759acb0c55160f5ab5cb1 | 07c3034f7b6ef88e08430b8c908613ea0091f1b6 | /Homework/HW1/hc2324_hw1_q3.py | 8c4604900546d1f073f205bf4eca98d9a5734f9d | [] | no_license | HelalChow/Data-Structures | 494b8fabcdf1cac20db78055547ce4160ad6a018 | 3b3401cbd23e01b2d7d95dfc3b95451ca179cee9 | refs/heads/master | 2021-10-19T00:24:23.589224 | 2019-02-15T22:34:05 | 2019-02-15T22:34:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 358 | py | def a_sum_square(n):
sum=0
for i in range(n):
sum+=i**2
return sum
def b_sum_square(n):
return sum(i**2 for i in range(n))
def c_sum_square(n):
sum = 0
for i in range(n):
if i%2 != 0:
sum += i**2
return sum
def d_sum_square(n):
return sum(i**2 for i in range(n) if i%2 != 0)
| [
"[email protected]"
] | |
964100020297da1c3078fb8f0be88a105eaf54a7 | 46128b87bf516e34c2844b7a2de37606c1381319 | /backend/apps/crowd_bt/types.py | 78e1a4852e6817f06d399bb0ee8f67c8685f3450 | [] | no_license | nxxtlib/votai | 0f9848ef64375ee2beb07e38d009bdf8360c63ed | b8907b23190c1e164d0130538e90356a11f43534 | refs/heads/master | 2022-11-24T18:44:51.416348 | 2020-06-01T22:01:08 | 2020-06-01T22:01:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,631 | py | from typing import NewType, NamedTuple
# pylint: disable=pointless-string-statement
"""Mu
Given a relevance score (s), its Gaussian-distributed format is given by:
s ~ N(μ, σ²)
"""
Mu = NewType("Mu", float)
"""Sigma Squared
Given a relevance score (s), its Gaussian-distributed format is given by:
s ~ N(μ, σ²)
"""
SigmaSquared = NewType("SigmaSquared", float)
"""Alpha
Given the probability that the annotator agrees with the true pairwise preferences(η),
we assume it to be a Beta-distributed random variable with parameters α and β:
η ∼ Beta(α, β)
"""
Alpha = NewType("Alpha", float)
"""Beta
Given the probability that the annotator agrees with the true pairwise preferences(η),
we assume it to be a Beta-distributed random variable with parameters α and β:
η ∼ Beta(α, β)
"""
Beta = NewType("Beta", float)
"""Normalization Constant
Normalization constant used to regulate the expected information gain.
"""
C = NewType("C", float) # pylint: disable=invalid-name
class RelevanceScore(NamedTuple):
"""Relevance Score (s)
Score that evaluates how relevant an item is according to annotators
Represented as a Gaussian-distributed random variable with parameters μ and σ²
such that:
s ~ N(μ, σ²)
"""
mu: Mu
sigma_squared: SigmaSquared
class AnnotatorConfidence(NamedTuple):
"""Annotator Confidence (η)
Probability that an annotator agrees with the true pairwise preference
Represented as a Beta-distributed random variable with parameters α and β,
such that:
η ~ Beta(α, β)
"""
alpha: Alpha
beta: Beta
| [
"[email protected]"
] | |
31fbf2cea496cbee545c55ace24d1fbf333cb2ee | 52e7f32f1d9cff522d76583036735ddd27cd9f7a | /pjs/scoping.py | 1127f1febebe38098d14ec04302346b69397eb87 | [] | no_license | niallo/PJs | 5f8167610a312f249d5f6d64287ee244be29dcf3 | e0adb313559774fb1798f56f03aea2e3f0abfd3b | refs/heads/master | 2021-01-16T20:56:57.175927 | 2013-07-13T21:11:54 | 2013-07-13T21:11:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,421 | py | import ast
from converter import register as converts, PJsNotImplemented
import utils
FUNC_TEMPLATE = '''\
%(left)s = %(dec_front)s$def(%(special)sfunction $_%(name)s(%(args)s) { // %(lineno)d
%(contents)s
})%(dec_back)s;
%(rname)s.__module__ = _.__name__;
%(rname)s.__name__ = $b.str("%(name)s");
'''
LAMBDA_TEMPLATE = '''\
$def(%(special)sfunction $_lambda(%(args)s) {return %(contents)s;})'''
CLASS_TEMPLATE = '''\
%(left)s = %(dec_front)sClass('%(name)s', [%(bases)s], (function(){
var __%(lnum)s = {};
%(contents)s
return __%(lnum)s;
}()))%(dec_back)s;
%(rname)s.__module__ = _.__name__;
'''
@converts(ast.FunctionDef)
def functiondef(conv, node, scope):
dct = {
'name': node.name,
'lineno': node.lineno,
'special': function_special(conv, node, scope),
'left': utils.lhand_assign(node.name, scope),
'rname': utils.resolve(node.name, scope),
}
args = function_args(conv, node, scope)
dct['args'] = ', '.join(args)
dct['dec_front'] = ''
dct['dec_back'] = ''
for dec in node.decorator_list:
dct['dec_front'] += conv.convert_node(dec, scope) + '('
dct['dec_back'] += ')'
scope = scope.copy()
scope.explicit_locals = False
scope.locals += args
dct['contents'] = utils.fix_undef(conv.convert_block(node.body, scope), scope)
return FUNC_TEMPLATE % dct
def function_args(conv, node, scope):
args = list(arg.id for arg in node.args.args)
if node.args.vararg:
args.append(node.args.vararg)
if node.args.kwarg:
args.append(node.args.kwarg)
return args
def function_special(conv, node, scope):
defaults = function_defaults(conv, node, scope)
if node.args.kwarg:
return defaults + ', ' + str(bool(node.args.vararg)).lower() + ', true, '
elif node.args.vararg:
return defaults + ', true, '
elif defaults != '{}':
return defaults + ', '
else:
return ''
def function_defaults(conv, node, scope):
args = list(arg.id for arg in node.args.args)
defaults = []
for default, name in zip(reversed(node.args.defaults), reversed(args)):
defaults.append("'%s': %s" % (name, conv.convert_node(default, scope)))
return '{' + ', '.join(defaults) + '}'
@converts(ast.Lambda)
def lambdadef(conv, node, scope):
dct = {
'special': function_special(conv, node, scope),
}
args = function_args(conv, node, scope)
dct['args'] = ', '.join(args)
scope = scope.copy()
scope.explicit_locals = False
scope.locals += args
dct['contents'] = utils.fix_undef(conv.convert_node(node.body, scope), scope)
return LAMBDA_TEMPLATE % dct
@converts(ast.ClassDef)
def classdef(conv, node, scope):
imports = []
dct = {
'name': node.name,
'bases': ', '.join(utils.resolve(base.id, scope) for base in node.bases),
'left': utils.lhand_assign(node.name, scope),
'rname': utils.resolve(node.name, scope),
}
dct['dec_front'] = ''
dct['dec_back'] = ''
for dec in node.decorator_list:
dct['dec_front'] += conv.convert_node(dec, scope) + '('
dct['dec_back'] += ')'
scope = scope.copy()
scope.explicit_locals = True
dct['contents'] = utils.fix_undef(conv.convert_block(node.body, scope), scope)
dct['lnum'] = len(scope.parent_locals)
return CLASS_TEMPLATE % dct
# vim: et sw=4 sts=4
| [
"[email protected]"
] | |
3cdece2e48e8bed2a644484e138ec349ae54e1ab | 82bab97dc70cad2e8a64c9563eb36694899683b0 | /launcher.py | 3d579fd84cf1d0a949f0dba7a5fb6f329e90bd83 | [] | no_license | lubusax/ras_1901 | a2a0297c5751d9bd2cc10d5790a67df76779580c | 7f4a590f7e2b9b70c47e2c4ec7615f2aca91f57a | refs/heads/master | 2020-04-16T02:59:00.048554 | 2019-01-20T18:40:49 | 2019-01-20T18:40:49 | 165,215,773 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,501 | py | #! /usr/bin/python3.5
import os, sys, time
from dicts.ras_dic import WORK_DIR, PinsBuzzer, PinsDown, PinsOK
from lib import Display, CardReader, PasBuz, Button
from lib import Odooxlm, Tasks
Buz = PasBuz.PasBuz( PinsBuzzer )
Disp = Display.Display()
Reader = CardReader.CardReader()
B_Down = Button.Button( PinsDown )
B_OK = Button.Button( PinsOK )
Hardware = [ Buz, Disp, Reader, B_Down, B_OK]
Odoo = Odooxlm.Odooxlm() # communicate via xlm
Tasks = Tasks.Tasks( Odoo, Hardware )
def ask_twice():
# user asked twice before executing -'are you sure?'
Buz.Play('OK')
Disp.display_msg('sure?')
B_OK.pressed = False # avoid false positives
B_Down.pressed = False
time.sleep(0.4) # allow time to take the finger
# away from the button
while not ( B_OK.pressed or B_Down.pressed): #wait answer
B_Down.scanning()
B_OK.scanning()
if B_OK.pressed: # OK pressed for a second time
Tasks.selected() # The selected Task is run.
# When the Admin Card is swiped
# the Program returns here again.
else:
Buz.Play('down')
time.sleep(0.4) # allow time to take the finger
# away from the button
B_OK.pressed = False # avoid false positives
B_Down.pressed = False
def main_loop():
# The Main Loop only ends when the option to reboot is chosen.
# In all the Tasks, when the Admin Card is swiped,
# the program returns to this Loop, where a new Task
# can be selected using the OK and Down Buttons.
Disp.initial_display()
if not Tasks.wifi_active(): # make sure that the Terminal is
Tasks.reset_wifi() # connected to a WiFi
if not Odoo.uid: # make sure that we have
Tasks.reset_odoo() # access to an odoo db
Tasks.selected() # when the terminal is switched on it goes
# to the predefined Task (begin_option)
while not ( Tasks.reboot == True ):
Disp.display_msg( Tasks.option_name() )
if B_OK.pressed:
if (Tasks.option_name() in Tasks.ask_twice):
ask_twice()
else:
Tasks.selected()
elif B_Down.pressed:
Tasks.down()
B_Down.scanning() # If no Button was Pressed
B_OK.scanning() # continue scanning
Disp.display_msg('shut_down')
time.sleep(1.5)
Disp.clear_display()
# os.system('sudo reboot')
main_loop()
| [
"[email protected]"
] | |
d8ea0a8e9fe2cd0625dc5ac26cddd66c3fed3058 | 00c6ded41b84008489a126a36657a8dc773626a5 | /.history/Sizing_Method/ConstrainsAnalysis/DesignPointSelectStrategy_20210714184448.py | c1f95465cbf3857a24980aee211e2520759b0265 | [] | no_license | 12libao/DEA | 85f5f4274edf72c7f030a356bae9c499e3afc2ed | 1c6f8109bbc18c4451a50eacad9b4dedd29682bd | refs/heads/master | 2023-06-17T02:10:40.184423 | 2021-07-16T19:05:18 | 2021-07-16T19:05:18 | 346,111,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,588 | py | # author: Bao Li #
# Georgia Institute of Technology #
import sys
import os
sys.path.insert(0, os.getcwd())
import numpy as np
import matplotlib.pylab as plt
import Sizing_Method.Other.US_Standard_Atmosphere_1976 as atm
import Sizing_Method.Aerodynamics.ThrustLapse as thrust_lapse
import Sizing_Method.Aerodynamics.Aerodynamics as ad
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysis as ca
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPD as ca_pd
import Sizing_Method.ConstrainsAnalysis.ConstrainsAnalysisPDP1P2 as ca_pd_12
from scipy.optimize import curve_fit
"""
The unit use is IS standard
"""
class Design_Point_Select_Strategy:
"""This is a design point select strategy from constrains analysis"""
def __init__(self, altitude, velocity, beta, method, p_turbofan_max, p_motorfun_max, n=12):
"""
:param altitude: m x 1 matrix
:param velocity: m x 1 matrix
:param beta: P_motor/P_total m x 1 matrix
:param p_turbofan_max: maximum propulsion power for turbofan (threshold value)
:param p_motorfun_max: maximum propulsion power for motorfun (threshold value)
:param n: number of motor
the first group of condition is for stall speed
the stall speed condition have to use motor, therefore with PD
:return:
power load: design point p/w and w/s
"""
self.h = altitude
self.v = velocity
self.beta = beta
self.n_motor = n
self.p_turbofan_max = p_turbofan_max
self.p_motorfun_max = p_motorfun_max
# initialize the p_w, w_s, hp, n, m
self.n = 100
self.m = len(self.h)
self.hp = np.linspace(0, 1, self.n)
self.hp_threshold = self.p_motorfun_max / (self.p_motorfun_max + self.p_turbofan_max)
# method1 = Mattingly_Method, method2 = Gudmundsson_Method
if method == 1:
self.method1 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Mattingly_Method_with_DP_electric
else:
self.method1 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_turbofun
self.method2 = ca_pd_12.ConstrainsAnalysis_Gudmundsson_Method_with_DP_electric
problem = self.method(self.h[0], self.v[0], self.beta[0], 6000, self.hp_threshold)
self.w_s = problem.allFuncs[0](problem)
def p_w_compute(self):
self.p_w = np.zeros([self.m, self.n]) # m x n matrix
for i in range(1, 8):
for j in range(self.n):
problem1 = self.method1(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
problem2 = self.method2(self.h[i], self.v[i],
self.beta[i], self.w_s, self.hp[j])
if i >= 5:
p_w_1 = problem1.allFuncs[-1](problem1, roc=15 - 5 * (i - 5))
p_w_2 = problem2.allFuncs[-1](problem2, roc=15 - 5 * (i - 5))
else:
p_w_1 = problem1.allFuncs[i](problem1)
p_w_2 = problem2.allFuncs[i](problem2)
if p_w_1 > self.p_turbofan_max:
p_w_1 = 100000
elif p_w_2 > self.p_motorfun_max:
p_w_2 = 100000
self.p_w[i, j] = p_w_1 + p_w_2
return self.p_w
def strategy(self):
#find the min p_w for difference hp for each flight condition:
for i in range
| [
"[email protected]"
] | |
b93fcfdfac7b65cb81f229e57d46e240ab834093 | 920ab19b73a7cba21d340a49d9d24e2d1eeabf3d | /idps/lib/python3.7/site-packages/identify/extensions.py | e7aa969e273222d1c0e97ee9afa785c23c756811 | [
"MIT"
] | permissive | DTrafford/IDPS | 5fa2b73f2c47cbf50b90a1a786c10f7d69c995b4 | 1eaccfc218adcb7231e64271731c765f8362b891 | refs/heads/master | 2022-12-16T16:28:34.801962 | 2020-03-30T18:08:09 | 2020-03-30T18:08:09 | 234,163,829 | 0 | 0 | MIT | 2020-09-10T06:26:02 | 2020-01-15T20:10:09 | Python | UTF-8 | Python | false | false | 6,615 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import unicode_literals
EXTENSIONS = {
'apinotes': {'text', 'apinotes'},
'asar': {'binary', 'asar'},
'bash': {'text', 'shell', 'bash'},
'bat': {'text', 'batch'},
'bmp': {'binary', 'image', 'bitmap'},
'bz2': {'binary', 'bzip2'},
'c': {'text', 'c'},
'cc': {'text', 'c++'},
'cu': {'text', 'cuda'},
'cfg': {'text'},
'cmake': {'text', 'cmake'},
'cnf': {'text'},
'coffee': {'text', 'coffee'},
'conf': {'text'},
'cpp': {'text', 'c++'},
'crt': {'text', 'pem'},
'cs': {'text', 'c#'},
'cson': {'text', 'cson'},
'css': {'text', 'css'},
'csv': {'text', 'csv'},
'cxx': {'text', 'c++'},
'dart': {'text', 'dart'},
'def': {'text', 'def'},
'dtd': {'text', 'dtd'},
'ear': {'binary', 'zip', 'jar'},
'ejs': {'text', 'ejs'},
'eot': {'binary', 'eot'},
'eps': {'binary', 'eps'},
'erb': {'text', 'erb'},
'exe': {'binary'},
'eyaml': {'text', 'yaml'},
'feature': {'text', 'gherkin'},
'fish': {'text', 'fish'},
'gemspec': {'text', 'ruby'},
'gif': {'binary', 'image', 'gif'},
'go': {'text', 'go'},
'gotmpl': {'text', 'gotmpl'},
'gpx': {'text', 'gpx', 'xml'},
'gradle': {'text', 'groovy'},
'groovy': {'text', 'groovy'},
'gyb': {'text', 'gyb'},
'gyp': {'text', 'gyp', 'python'},
'gypi': {'text', 'gyp', 'python'},
'gz': {'binary', 'gzip'},
'h': {'text', 'header', 'c', 'c++'},
'hpp': {'text', 'header', 'c++'},
'htm': {'text', 'html'},
'html': {'text', 'html'},
'hxx': {'text', 'header', 'c++'},
'icns': {'binary', 'icns'},
'ico': {'binary', 'icon'},
'ics': {'text', 'icalendar'},
'idl': {'text', 'idl'},
'inc': {'text', 'inc'},
'ini': {'text', 'ini'},
'j2': {'text', 'jinja'},
'jade': {'text', 'jade'},
'jar': {'binary', 'zip', 'jar'},
'java': {'text', 'java'},
'jenkinsfile': {'text', 'groovy'},
'jinja': {'text', 'jinja'},
'jinja2': {'text', 'jinja'},
'jpeg': {'binary', 'image', 'jpeg'},
'jpg': {'binary', 'image', 'jpeg'},
'js': {'text', 'javascript'},
'json': {'text', 'json'},
'jsonnet': {'text', 'jsonnet'},
'jsx': {'text', 'jsx'},
'key': {'text', 'pem'},
'kml': {'text', 'kml', 'xml'},
'kt': {'text', 'kotlin'},
'less': {'text', 'less'},
'lua': {'text', 'lua'},
'm': {'text', 'c', 'objective-c'},
'manifest': {'text', 'manifest'},
'map': {'text', 'map'},
'markdown': {'text', 'markdown'},
'md': {'text', 'markdown'},
'mib': {'text', 'mib'},
'mk': {'text', 'makefile'},
'mm': {'text', 'c++', 'objective-c++'},
'modulemap': {'text', 'modulemap'},
'ngdoc': {'text', 'ngdoc'},
'nim': {'text', 'nim'},
'nims': {'text', 'nim'},
'nimble': {'text', 'nimble'},
'nix': {'text', 'nix'},
'otf': {'binary', 'otf'},
'p12': {'binary', 'p12'},
'patch': {'text', 'diff'},
'pdf': {'binary', 'pdf'},
'pem': {'text', 'pem'},
'php': {'text', 'php'},
'php4': {'text', 'php'},
'php5': {'text', 'php'},
'phtml': {'text', 'php'},
'pl': {'text', 'perl'},
'plantuml': {'text', 'plantuml'},
'png': {'binary', 'image', 'png'},
'po': {'text', 'pofile'},
'pp': {'text', 'puppet'},
'properties': {'text', 'java-properties'},
'proto': {'text', 'proto'},
'purs': {'text', 'purescript'},
'py': {'text', 'python'},
'pyi': {'text', 'pyi'},
'pyx': {'text', 'cython'},
'pxd': {'text', 'cython'},
'pxi': {'text', 'cython'},
'r': {'text', 'r'},
'rb': {'text', 'ruby'},
'rs': {'text', 'rust'},
'rst': {'text', 'rst'},
's': {'text', 'asm'},
'sbt': {'text', 'sbt', 'scala'},
'sc': {'text', 'scala'},
'scala': {'text', 'scala'},
'scss': {'text', 'scss'},
'sh': {'text', 'shell'},
'sls': {'text', 'salt'},
'so': {'binary'},
'sol': {'text', 'solidity'},
'spec': {'text', 'spec'},
'styl': {'text', 'stylus'},
'sql': {'text', 'sql'},
'svg': {'text', 'svg'},
'swf': {'binary', 'swf'},
'swift': {'text', 'swift'},
'swiftdeps': {'text', 'swiftdeps'},
'tac': {'text', 'twisted', 'python'},
'tar': {'binary', 'tar'},
'tgz': {'binary', 'gzip'},
'thrift': {'text', 'thrift'},
'tiff': {'binary', 'image', 'tiff'},
'toml': {'text', 'toml'},
'tf': {'text', 'terraform'},
'ts': {'text', 'ts'},
'tsx': {'text', 'tsx'},
'ttf': {'binary', 'ttf'},
'txt': {'text', 'plain-text'},
'vdx': {'text', 'vdx'},
'vim': {'text', 'vim'},
'vue': {'text', 'vue'},
'war': {'binary', 'zip', 'jar'},
'wav': {'binary', 'audio', 'wav'},
'wkt': {'text', 'wkt'},
'whl': {'binary', 'wheel', 'zip'},
'woff': {'binary', 'woff'},
'woff2': {'binary', 'woff2'},
'wsgi': {'text', 'wsgi', 'python'},
'xml': {'text', 'xml'},
'xq': {'text', 'xquery'},
'xql': {'text', 'xquery'},
'xqm': {'text', 'xquery'},
'xqu': {'text', 'xquery'},
'xquery': {'text', 'xquery'},
'xqy': {'text', 'xquery'},
'xsd': {'text', 'xml', 'xsd'},
'xsl': {'text', 'xml', 'xsl'},
'yaml': {'text', 'yaml'},
'yang': {'text', 'yang'},
'yin': {'text', 'xml', 'yin'},
'yml': {'text', 'yaml'},
'zig': {'text', 'zig'},
'zip': {'binary', 'zip'},
'zsh': {'text', 'shell', 'zsh'},
}
EXTENSIONS_NEED_BINARY_CHECK = {
'plist': {'plist'},
}
NAMES = {
'.babelrc': {'text', 'json', 'babelrc'},
'.bowerrc': {'text', 'json', 'bowerrc'},
'.coveragerc': {'text', 'ini', 'coveragerc'},
'.dockerignore': {'text', 'dockerignore'},
'.editorconfig': {'text', 'editorconfig'},
'.gitattributes': {'text', 'gitattributes'},
'.gitignore': {'text', 'gitignore'},
'.gitmodules': {'text', 'gitmodules'},
'.jshintrc': {'text', 'json', 'jshintrc'},
'.mailmap': {'text', 'mailmap'},
'.mention-bot': {'text', 'json', 'mention-bot'},
'.npmignore': {'text', 'npmignore'},
'.yamllint': {'text', 'yaml', 'yamllint'},
'AUTHORS': EXTENSIONS['txt'],
'CMakeLists.txt': EXTENSIONS['cmake'],
'COPYING': EXTENSIONS['txt'],
'Dockerfile': {'text', 'dockerfile'},
'Gemfile': EXTENSIONS['rb'],
'Jenkinsfile': {'text', 'groovy'},
'LICENSE': EXTENSIONS['txt'],
'MAINTAINERS': EXTENSIONS['txt'],
'Makefile': EXTENSIONS['mk'],
'NOTICE': EXTENSIONS['txt'],
'PATENTS': EXTENSIONS['txt'],
'Pipfile': EXTENSIONS['toml'],
'Pipfile.lock': EXTENSIONS['json'],
'README': EXTENSIONS['txt'],
'Rakefile': EXTENSIONS['rb'],
'setup.cfg': EXTENSIONS['ini'],
}
| [
"[email protected]"
] | |
66e2bb71af508d27ce94ce064013eb5f466c0f3e | 88e06bab1989c81a2dd649bb09b144fa7c958f89 | /leet_construct_binary_tree_from_preorder_and_inorder.py | 8cb4b670c2f1f3cd6c7fe8901450b6acf460ad69 | [] | no_license | VaibhavD143/Coding | 4499526b22ee4ef13f66c3abcea671c80a8f748a | 5de3bae8891c7d174cbc847a37c3afb00dd28f0e | refs/heads/master | 2023-08-06T21:56:44.934954 | 2021-10-09T18:31:29 | 2021-10-09T18:31:29 | 263,890,286 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | """
To understand base:
take example
[8,5,2,3,4,6,7,9]
[3,2,4,5,7,6,9,8]
[3,9,20,15,7]
[9,3,15,20,7]
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def buildTree(self, preorder: List[int], inorder: List[int]) -> TreeNode:
ind = {val:i for i,val in enumerate(inorder)}
def rec(ps,pe,base):
if ps == pe:
return TreeNode(preorder[ps])
if ps>pe:
return None
root = TreeNode(preorder[ps])
i = ind[preorder[ps]] #position in inorder
diff = i-base #number of elements in left tree of root
root.left = rec(ps+1,ps+diff,base)
root.right = rec(ps+diff+1,pe,i+1)
return root
return rec(0,len(preorder)-1,0) | [
"[email protected]"
] | |
774ed71b32ee8d05b954da997e212641803eb3da | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/otherforms/_inkier.py | 178f6bd2151eec1fdf19af11edad7fd9981faaa8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py |
#calss header
class _INKIER():
def __init__(self,):
self.name = "INKIER"
self.definitions = inky
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['inky']
| [
"[email protected]"
] | |
2ea765de2d7772e866332a742a8c10c8d9f383cc | f7c07caa1210d2a08e8433cdd854b1232efa88e3 | /Strings/Examples/String-Length.py | dd3c0d71a2a46ce1895db65abea948a473ec16b9 | [] | no_license | rchicoli/ispycode-python | c2fbecc28bf32933150986d24f77b7297f50b78e | fa27f2377943ac2e4d983065406578151091e3f5 | refs/heads/master | 2020-03-20T11:34:59.698618 | 2018-06-14T21:14:02 | 2018-06-14T21:14:02 | 137,407,150 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 58 | py |
str = 'Hello World'
length = len(str)
print(length)
| [
"[email protected]"
] | |
cd27aa59e79f26edb2200917c400319d78be84be | f32ea412120038e2f3e745c9331595aac855556f | /mayaTools/cgm/core/cgmPy/os_Utils.py | 6ea1cd1cf8f2b3befee3f1cfb766a5c0dcbc4ff7 | [
"BSD-3-Clause"
] | permissive | liudger/cgmTools | d507d2415ce40494d7fc3193d7c2d9e6f5496a28 | 4ab51c1ee37752b91311fd8809405f36fce6e90a | refs/heads/master | 2020-12-10T08:03:11.096502 | 2019-05-08T14:05:27 | 2019-05-08T14:05:27 | 233,541,539 | 1 | 0 | null | 2020-01-13T07:57:48 | 2020-01-13T07:57:47 | null | UTF-8 | Python | false | false | 13,037 | py | """
os_Utils
Josh Burton (under the supervision of David Bokser:)
www.cgmonks.com
1/12/2011
Key:
1) Class - Limb
Creates our rig objects
2)
"""
# From Python =============================================================
import re
import os
import stat
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
# From Maya =============================================================
import maya.cmds as mc
import maya.mel as MEL
# From cgm ==============================================================
from cgm.core.cgmPy import validateArgs as cgmValid
from cgm.core.cgmPy import path_Utils as cgmPath
PATH = cgmPath
from cgm.core import cgm_General as cgmGen
#from cgm.lib.zoo.zooPy.path import Path
#import cgm.lib.zoo.zooPy.path as zooPath
#reload(zooPath)
#>>> Utilities
#===================================================================
def get_lsFromPath(str_path = None,
matchArg = None,
calledFrom = None,
removeInit = True, **kwargs):
"""
Return files or folders of a specific type from a given path
:parameters:
str_path | str
The base file path
matchArg | str
Type of file or folder to be returned.
:returns:
result
:raises:
TypeError | if 'str_path' is not a string
ValueError | if 'str_path' is a recognized dir path
TypeError | if 'matchArg' is not a string
"""
def prepReturn(result,removeInit):
for r in result:
if '__init__' in r:
result.remove(r)
return result
log.debug("get_lsFromPath str_path = {1} | matchArg={0}".format(matchArg,str_path))
_str_funcRoot = 'get_lsFromPath'
if calledFrom: _str_funcName = "{0}.{1}({2})".format(calledFrom,_str_funcRoot,matchArg)
else:_str_funcName = "{0}({1})".format(_str_funcRoot,matchArg)
result = None
#>> Check the str_path
if not isinstance(str_path, basestring):
raise TypeError('path must be string | str_path = {0}'.format(str_path))
if os.path.isfile(str_path):
str_path = cgmPath.Path(str_path).up()
log.info("{0} >> passed file. using dir: {1}".format(_str_funcName,str_path))
if not os.path.isdir(str_path):
raise ValueError('path must validate as os.path.isdir | str_path = {0}'.format(str_path))
#try:#>> Check matchArg
if matchArg is not None:
if issubclass(type(matchArg),list):
_res = []
for a in matchArg:
_res.extend(find_files(str_path,a))
return _res
elif not isinstance(matchArg, basestring):
raise TypeError('matchArg must be string | matchArg: {0}'.format(matchArg))
if matchArg is None or matchArg in ['']:
return [ name for name in os.listdir(str_path) ]
#if '*.' in matchArg:
#l_buffer = matchArg.split('*')
#return [ name for name in os.listdir(str_path) if name[-3:] == matchArg.split('*')[-1]]
if matchArg.lower() in ['folder','dir']:
return [ name for name in os.listdir(str_path) if os.path.isdir(os.path.join(str_path, name)) ]
elif matchArg.lower() in ['maya files','maya']:
return [ name for name in os.listdir(str_path) if name[-3:] in ['.ma','.mb'] ]
else:
return find_files(str_path,matchArg)
#raise NotImplementedError,'matchArg handler not in | matchArg: {0}'.format(matchArg)
return result
'''
def getLibraryClips( self, library ):
clips = {presets.LOCAL: [], presets.GLOBAL: []}
possibleTypes = AnimClipPreset, PoseClipPreset
for locale, localeClips in clips.iteritems():
for dir in self._presetManager.getPresetDirs(locale):
dir += library
if not dir.exists():
continue
for f in dir.files():
for clipType in possibleTypes:
if f.hasExtension( clipType.EXT ):
localeClips.append( clipType( locale, library, f.name() ) )
'''
def returnPyFilesFromFolder():
import os
thisFile = cgmPath.Path( __file__ )
thisPath = thisFile.up()
bufferList = find_files(thisPath, '*.py')
returnList = []
for file in bufferList:
if '__' not in file:
splitBuffer = file.split('.')
returnList.append(splitBuffer[0])
if returnList:
return returnList
else:
return False
def find_files(base, pattern):
import fnmatch
import os
'''Return list of files matching pattern in base folder.'''
""" http://stackoverflow.com/questions/4296138/use-wildcard-with-os-path-isfile"""
return [n for n in fnmatch.filter(os.listdir(base), pattern) if
os.path.isfile(os.path.join(base, n))]
def get_module_data(path = None, level = None, mode = 0, cleanPyc = False):
"""
Function for walking below a given directory looking for modules to reload. It finds modules that have pyc's as
well for help in reloading. There is a cleaner on it as well to clear all pycs found.
:parameters
path(str)
level(int) - Depth to search. None means everything
mode(int)
0 - normal
1 - pycs only
self(instance): cgmMarkingMenu
cleanPyc: Delete pycs after check
:returns
_d_files,_l_ordered,_l_pycd
_d_files - dict of import key to file
_l_ordered - ordered list of module keys as found
_l_pycd - list of modules that were _pycd
"""
_str_func = 'get_module_data'
_b_debug = log.isEnabledFor(logging.DEBUG)
_path = PATH.Path(path)
_l_subs = []
_d_files = {}
_d_names = {}
_d_pycd = {}
_d_pycs = {}
_l_duplicates = []
_l_errors = []
_l_pyc = []
_l_pycd = []
_base = _path.split()[-1]
_l_ordered_list = []
log.debug("|{0}| >> Checking base: {1} | path: {2}".format(_str_func,_base,path))
_i = 0
for root, dirs, files in os.walk(path, True, None):
# Parse all the files of given path and reload python modules
_mRoot = PATH.Path(root)
_split = _mRoot.split()
_subRoot = _split[-1]
_splitUp = _split[_split.index(_base):]
log.debug("|{0}| >> On subroot: {1} | path: {2}".format(_str_func,_subRoot,root))
log.debug("|{0}| >> On split: {1}".format(_str_func,_splitUp))
_mod = False
_l_sub = []
for f in files:
key = False
_pycd = False
_long = os.path.join(root,f)
if f.endswith('.pyc'):
#name = f[:-4]
#key = f
_l_pyc.append(os.path.join(root,f))
if f.endswith('.py'):
_str_pycCheck = _long.replace('.py','.pyc')
if os.path.exists(_str_pycCheck):
_pycd = True
if f == '__init__.py':
if _i == 0:
key = _base
name = _base
else:
key = '.'.join(_splitUp)
name = _subRoot
_mod = key
else:
name = f[:-3]
if _i == 0:
key = '.'.join([_base,name])
else:
key = '.'.join(_splitUp + [name])
#log.debug("|{0}| >> found: {1}".format(_str_func,name))
if key:
if key not in _d_files.keys():
if key != _mod:_l_sub.append(key)
_d_files[key] = os.path.join(root,f)
_d_names[key] = name
_d_pycd[key] = _pycd
if _pycd:
_l_pycd.append(key)
_d_pycs[key] = _str_pycCheck
else:
_l_duplicates.append("{0} >> {1} ".format(key, os.path.join(root,f)))
"""
try:
module = __import__(name, globals(), locals(), ['*'], -1)
reload(module)
except ImportError, e:
for arg in e.args:
logger.debug(arg)
except Exception, e:
for arg in e.args:
logger.debug(arg)
# Now reload sub modules as well
for dir_name in dirs:
__reloadRecursive(
os.path.join(path, dir_name), parent_name+'.'+dir_name
)"""
if _mod:
_l_ordered_list.append(_mod)
if _l_sub:_l_ordered_list.extend(_l_sub)
if level is not None and _i >= level:break
_i +=1
if cleanPyc:
_l_failed = []
log.debug("|{0}| >> Found {1} pyc files under: {2}".format(_str_func,len(_l_pyc),path))
for _file in _l_pyc:
#for k in _l_ordered_list:
#if k in _l_pycd:
log.debug("|{0}| >> Attempting to clean pyc for: {1} ".format(_str_func,_file))
if not _file.endswith('.pyc'):
raise ValueError,"Should NOT be here"
try:
os.remove( _file )
except WindowsError, e:
try:
log.info("|{0}| >> Initial delete fail. attempting chmod... ".format(_str_func))
os.chmod( _file, stat.S_IWRITE )
os.remove( _file )
except Exception,e:
for arg in e.args:
log.error(arg)
raise RuntimeError,"Stop"
_l_pyc = []
if mode == 1:
log.info(cgmGen._str_subLine)
log.info("|{0}| >> Found {1} pyc files under: {2}".format(_str_func,len(_l_pyc),path))
for m in _l_pyc:
print(m)
return _l_pyc
if _b_debug:
cgmGen.log_info_dict(_d_files,"Files")
cgmGen.log_info_dict(_d_names,"Imports")
if _l_duplicates:
log.debug(cgmGen._str_subLine)
log.error("|{0}| >> DUPLICATE MODULES....")
for m in _l_duplicates:
if _b_debug:print(m)
log.debug("|{0}| >> Found {1} modules under: {2}".format(_str_func,len(_d_files.keys()),path))
log.debug(cgmGen._str_subLine)
log.debug("|{0}| >> Ordered MODULES....".format(_str_func))
for k in _l_ordered_list:
if _b_debug:print(k)
log.debug(cgmGen._str_subLine)
log.debug("|{0}| >> PYCD MODULES({1})....".format(_str_func,len(_l_pycd)))
for k in _l_pycd:
if _b_debug:print(k)
return _d_files, _l_ordered_list, _l_pycd
def import_file(mFile = None, namespace = None):
"""
Import a file with a list of items
"""
_str_func = 'import_file'
if not os.path.exists(mFile):
log.error("|{0}| >> File doesn't exist: '{1}'".format(_str_func,mFile))
return False
_i = 0
_name = 'IMPORT_{0}'.format(_i)
while mc.objExists(_name):
_i +=1
_name = 'IMPORT_{0}'.format(_i)
kws = {}
if namespace is not None:
kws = {'namespace':namespace}
if cgmGen.__mayaVersion__ == 11111:
if 'cat' == 'dog':
#file -import -type "mayaAscii" -ignoreVersion -ra true -mergeNamespacesOnClash false -namespace "test" -options "v=0;" -pr -importFrameRate true -importTimeRange "override" "D:/Dropbox/cgmMRS/maya/demo/mrsMakers_gettingStarted/sphere.ma";
#_str = 'file -import -pr -prompt false -options "v=0;" -gn "{0}" -gr'.format(_name)
_str = 'file -import -ignoreVersion -ra true -mergeNamespacesOnClash false -pr -options "v=0;" -gn "{0}" -gr'.format(_name)
if namespace is not None:
_str = _str + ' -namespace "{0}"'.format(namespace)
fileString = str(mFile)
l_fileString = list(fileString)
for i,v in enumerate(l_fileString):
if v == '\\':
l_fileString[i] = '/'
_str = '{0} "{1}";'.format(_str,''.join(l_fileString))
log.warning("|{0}| >> 2018 import: {1}".format(_str_func,_str))
print _str
MEL.eval(_str)
#Do not use the prompt flag!
mc.file(mFile, i = True, pr = True, force = True, gn = _name, gr = True, **kws)
_l = mc.listRelatives (_name, children = True, type='transform',fullPath=True) or []
_res = []
for c in _l:
_res.append(mc.parent(c, world = True)[0])
mc.delete(_name)
return _res
| [
"[email protected]"
] | |
8cc82b45d71fed13ba3d94a4606774d92b763412 | 1ee90596d52554cb4ef51883c79093897f5279a0 | /Sisteme/[Python]Interfață login illumina V1/root/introcreate.py | a79797deaa5a22f231b11556db856ac4d68b345d | [] | no_license | Reizonr1/metin2-adv | bf7ecb26352b13641cd69b982a48a6b20061979a | 5c2c096015ef3971a2f1121b54e33358d973c694 | refs/heads/master | 2022-04-05T20:50:38.176241 | 2020-03-03T18:20:58 | 2020-03-03T18:20:58 | 233,462,795 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,983 | py | import chr
import grp
import app
import net
import snd
import wndMgr
import systemSetting
import localeInfo
import ui
import musicInfo
import playerSettingModule
MAN = 0
WOMAN = 1
SHAPE0 = 0
SHAPE1 = 1
PAGE_COUNT = 2
SLOT_COUNT = 4
BASE_CHR_ID = 3
class CreateCharacterWindow(ui.Window):
class CharacterRenderer(ui.Window):
def OnRender(self):
grp.ClearDepthBuffer()
grp.SetGameRenderState()
grp.PushState()
grp.SetOmniLight()
screenWidth = wndMgr.GetScreenWidth()
screenHeight = wndMgr.GetScreenHeight()
newScreenWidth = float(screenWidth+30)
newScreenHeight = float(screenHeight+30)
grp.SetViewport(270.0/screenWidth, 0.0, newScreenWidth/screenWidth, newScreenHeight/screenHeight)
app.SetCenterPosition(20.0, -470.0, -100.0)
app.SetCamera(2800.0, 15.0, 180.0, 102.0)
grp.SetPerspective(10.0, newScreenWidth/newScreenHeight, 1000.0, 4000.0)
(x, y) = app.GetCursorPosition()
grp.SetCursorPosition(x, y)
chr.Deform()
chr.Render()
grp.RestoreViewport()
grp.PopState()
grp.SetInterfaceRenderState()
def __init__(self, stream):
ui.Window.__init__(self)
net.SetPhaseWindow(net.PHASE_WINDOW_CREATE, self)
self.stream = stream
def __del__(self):
net.SetPhaseWindow(net.PHASE_WINDOW_CREATE, 0)
ui.Window.__del__(self)
def Open(self):
playerSettingModule.LoadGameData("INIT")
self.reservingRaceIndex = -1
self.reservingShapeIndex = -1
self.reservingStartTime = 0
self.gender = 0
self.slot = -1
self.shape = 0
try:
dlgBoard = ui.ScriptWindow()
pythonScriptLoader = ui.PythonScriptLoader()
pythonScriptLoader.LoadScriptFile(dlgBoard, "pongo_work/characterwindow/create/createcharacterwindow.py")
except:
import exception
exception.Abort("CreateCharacterWindow.Open.LoadObject")
try:
getChild = dlgBoard.GetChild
self.btnCreate = getChild("create_button")
self.btnExit = getChild("exit_button")
self.editCharacterName = getChild("name")
self.genderButton = []
self.genderButton.append(getChild("gender_man"))
self.genderButton.append(getChild("gender_woman"))
self.char = []
self.char.append(getChild("char1"))
self.char.append(getChild("char2"))
self.char.append(getChild("char3"))
self.char.append(getChild("char4"))
self.shapeButton = []
self.shapeButton.append(getChild("shape1"))
self.shapeButton.append(getChild("shape2"))
self.backGround = getChild("BackGround")
except:
import exception
exception.Abort("CreateCharacterWindow.Open.BindObject")
self.btnCreate.SetEvent(ui.__mem_func__(self.CreateCharacter))
self.btnExit.SetEvent(ui.__mem_func__(self.CancelCreate))
self.genderButton[0].SetEvent(ui.__mem_func__(self.__SelectGender), MAN)
self.genderButton[1].SetEvent(ui.__mem_func__(self.__SelectGender), WOMAN)
self.editCharacterName.SetText("")
self.editCharacterName.SetReturnEvent(ui.__mem_func__(self.CreateCharacter))
self.editCharacterName.SetEscapeEvent(ui.__mem_func__(self.CancelCreate))
self.chrRenderer = self.CharacterRenderer()
self.chrRenderer.SetParent(self.backGround)
self.chrRenderer.Show()
self.dlgBoard = dlgBoard
self.characters = {
0 : [playerSettingModule.RACE_WARRIOR_M, playerSettingModule.RACE_ASSASSIN_M, playerSettingModule.RACE_SURA_M, playerSettingModule.RACE_SHAMAN_M],
1 : [playerSettingModule.RACE_WARRIOR_W, playerSettingModule.RACE_ASSASSIN_W, playerSettingModule.RACE_SURA_W, playerSettingModule.RACE_SHAMAN_W]}
self.char[0].SetEvent(ui.__mem_func__(self.__SelectSlot), 0)
self.char[1].SetEvent(ui.__mem_func__(self.__SelectSlot), 1)
self.char[2].SetEvent(ui.__mem_func__(self.__SelectSlot), 2)
self.char[3].SetEvent(ui.__mem_func__(self.__SelectSlot), 3)
self.shapeButton[0].SetEvent(ui.__mem_func__(self.__SelectShape), 0)
self.shapeButton[1].SetEvent(ui.__mem_func__(self.__SelectShape), 1)
self.EnableWindow()
self.__SelectSlot(app.GetRandom(0,3))
app.SetCamera(500.0, 10.0, 180.0, 95.0)
self.__SelectGender(0)
self.__SelectShape(0)
self.Show()
self.dlgBoard.Show()
if musicInfo.createMusic != "":
snd.SetMusicVolume(systemSetting.GetMusicVolume())
snd.FadeInMusic("BGM/"+musicInfo.createMusic)
app.ShowCursor()
def Close(self):
if musicInfo.createMusic != "":
snd.FadeOutMusic("BGM/"+musicInfo.createMusic)
for id in xrange(BASE_CHR_ID + SLOT_COUNT * PAGE_COUNT):
chr.DeleteInstance(id)
self.dlgBoard.Hide()
self.Hide()
app.HideCursor()
def EnableWindow(self):
self.reservingRaceIndex = -1
self.reservingShapeIndex = -1
self.reservingHairstyleIndex = -1
self.btnCreate.Enable()
self.btnExit.Enable()
self.editCharacterName.SetFocus()
self.editCharacterName.Enable()
self.genderButton[0].Enable()
self.genderButton[1].Enable()
self.shapeButton[0].Enable()
self.shapeButton[1].Enable()
self.char[0].Enable()
self.char[1].Enable()
self.char[2].Enable()
self.char[3].Enable()
for page in xrange(PAGE_COUNT):
for slot in xrange(SLOT_COUNT):
chr_id = self.__GetSlotChrID(page, slot)
chr.SelectInstance(chr_id)
chr.BlendLoopMotion(chr.MOTION_INTRO_WAIT, 0.1)
def DisableWindow(self):
self.btnCreate.Disable()
self.btnExit.Disable()
self.genderButton[0].Disable()
self.genderButton[1].Disable()
self.shapeButton[0].Disable()
self.shapeButton[1].Disable()
self.char[0].Disable()
self.char[1].Disable()
self.char[2].Disable()
self.char[3].Disable()
self.editCharacterName.Disable()
def __GetSlotChrID(self, page, slot):
return BASE_CHR_ID + page * SLOT_COUNT + slot
def __MakeCharacter(self,chr_id,race):
chr.CreateInstance(chr_id)
chr.SelectInstance(chr_id)
chr.SetVirtualID(chr_id)
chr.SetRace(race)
chr.SetArmor(0)
chr.SetHair(0)
chr.Refresh()
chr.SetMotionMode(chr.MOTION_MODE_GENERAL)
chr.SetLoopMotion(chr.MOTION_INTRO_WAIT)
chr.SetRotation(0.0)
chr.Hide()
def __SelectGender(self, gender):
for button in self.genderButton:
button.SetUp()
self.genderButton[gender].Down()
self.gender = gender
if gender == MAN:
for i in xrange(SLOT_COUNT):
chr.SelectInstance(self.__GetSlotChrID(0, i))
chr.Show()
for i in xrange(SLOT_COUNT):
chr.SelectInstance(self.__GetSlotChrID(1, i))
chr.Hide()
else:
for i in xrange(SLOT_COUNT):
chr.SelectInstance(self.__GetSlotChrID(0, i))
chr.Hide()
for i in xrange(SLOT_COUNT):
chr.SelectInstance(self.__GetSlotChrID(1, i))
chr.Show()
for id in xrange(BASE_CHR_ID + SLOT_COUNT * PAGE_COUNT):
chr.DeleteInstance(id)
chr_id = self.__GetSlotChrID(self.gender, self.slot)
self.__MakeCharacter(chr_id, self.characters[self.gender][self.slot])
self.__SelectShape(self.shape)
def __SelectShape(self, shape):
self.shape = shape
for i in xrange(len(self.shapeButton)):
self.shapeButton[i].SetUp()
self.shapeButton[shape].Down()
chr_id = self.__GetSlotChrID(self.gender, self.slot)
chr.SelectInstance(chr_id)
chr.ChangeShape(shape)
chr.SetMotionMode(chr.MOTION_MODE_GENERAL)
chr.SetLoopMotion(chr.MOTION_INTRO_WAIT)
def GetSlotIndex(self):
return self.slot
def __SelectSlot(self, slot):
if slot < 0:
return
if slot >= SLOT_COUNT:
return
if self.slot == slot:
return
self.slot = slot
if self.IsShow():
snd.PlaySound("sound/ui/click.wav")
chr_id = self.__GetSlotChrID(self.gender, slot)
for id in xrange(BASE_CHR_ID + SLOT_COUNT * PAGE_COUNT):
chr.DeleteInstance(id)
chr.SelectInstance(chr_id)
for i in xrange(len(self.char)):
self.char[i].SetUp()
self.char[slot].Down()
self.__MakeCharacter(chr_id, self.characters[self.gender][slot])
self.__SelectShape(self.shape)
self.__SelectGender(self.gender)
def CreateCharacter(self):
if -1 != self.reservingRaceIndex:
return
textName = self.editCharacterName.GetText()
if False == self.__CheckCreateCharacter(textName):
return
if musicInfo.selectMusic != "":
snd.FadeLimitOutMusic("BGM/"+musicInfo.selectMusic, systemSetting.GetMusicVolume()*0.05)
self.DisableWindow()
chr.SelectInstance(self.__GetSlotChrID(self.gender, self.slot))
chr.PushOnceMotion(chr.MOTION_INTRO_SELECTED)
self.reservingRaceIndex = chr.GetRace()
self.reservingShapeIndex = self.shape
self.reservingStartTime = app.GetTime()
def CancelCreate(self):
self.stream.SetSelectCharacterPhase()
def __CheckCreateCharacter(self, name):
if len(name) == 0:
self.PopupMessage(localeInfo.CREATE_INPUT_NAME, self.EnableWindow)
return False
if name.find(localeInfo.CREATE_GM_NAME)!=-1:
self.PopupMessage(localeInfo.CREATE_ERROR_GM_NAME, self.EnableWindow)
return False
if net.IsInsultIn(name):
self.PopupMessage(localeInfo.CREATE_ERROR_INSULT_NAME, self.EnableWindow)
return False
return True
def OnCreateSuccess(self):
self.stream.SetSelectCharacterPhase()
def OnCreateFailure(self, type):
if 1 == type:
self.PopupMessage(localeInfo.CREATE_EXIST_SAME_NAME, self.EnableWindow)
else:
self.PopupMessage(localeInfo.CREATE_FAILURE, self.EnableWindow)
def OnUpdate(self):
chr.Update()
for page in xrange(PAGE_COUNT):
for i in xrange(SLOT_COUNT):
chr.SelectInstance(self.__GetSlotChrID(page, i))
chr.Show()
if -1 != self.reservingRaceIndex:
if app.GetTime() - self.reservingStartTime >= 1.5:
chrSlot=self.stream.GetCharacterSlot()
textName = self.editCharacterName.GetText()
raceIndex = self.reservingRaceIndex
shapeIndex = self.reservingShapeIndex
net.SendCreateCharacterPacket(chrSlot, textName, raceIndex, shapeIndex, 0, 0, 0, 0)
self.reservingRaceIndex = -1
def EmptyFunc(self):
pass
def PopupMessage(self, msg, func=0):
if not func:
func=self.EmptyFunc
self.stream.popupWindow.Close()
self.stream.popupWindow.Open(msg, func, localeInfo.UI_OK)
def OnPressExitKey(self):
self.CancelCreate()
return True
| [
"[email protected]"
] | |
615cfacbf839f0821c7d576ef7d3c6b6b6f562ad | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03209/s218432315.py | e2d669ba01785a86ae398f08408031d285e65950 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | N,X = map(int,input().split())
siz = [1]
pat = [1]
for i in range(N):
siz.append(siz[-1]*2 + 3)
pat.append(pat[-1]*2 + 1)
def rec(n,x):
if n==0:
ret = int(x>0)
elif x <= 1 + siz[n-1]:
ret = rec(n-1, x-1)
else:
ret = pat[n-1] + 1 + rec(n-1, x-2-siz[n-1])
return ret
print(rec(N,X)) | [
"[email protected]"
] | |
cf17ba00092630b465a26dc9a485c9062396af08 | 9c4a70475f48b81b7b0d895e07b012dd8aca2c2d | /backend/remp_28495/urls.py | 364caaa46eb9dc964112a11701b2e8b40501fda0 | [] | no_license | crowdbotics-apps/remp-28495 | 81c5963490654cf5c7a977936a62b816ff967e5f | f2430f3b7dd53d9ff43465fe12da1dd28925e773 | refs/heads/master | 2023-06-10T21:07:46.400603 | 2021-07-06T16:45:51 | 2021-07-06T16:45:51 | 383,537,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,187 | py | """remp_28495 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "remp"
admin.site.site_title = "remp Admin Portal"
admin.site.index_title = "remp Admin"
# swagger
api_info = openapi.Info(
title="remp API",
default_version="v1",
description="API documentation for remp App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
| [
"[email protected]"
] | |
b9ff2167f40f7d7d526d185d522bbd4d81142ef8 | ec3362fe2ef1f23a9b1fad9469b6a2ec89beda3a | /hey-brether.py | f1125635a446a6c1bfc49963cb83ca9edc32f4fd | [
"MIT"
] | permissive | tomcola512/hey-brether | dcbee74d55c5a2c6cadb188d450af0862fc9041f | 42fb0114662476ffd8f3b091950bce6cbe836047 | refs/heads/master | 2020-03-24T19:21:29.053260 | 2018-07-30T19:53:16 | 2018-07-30T19:53:16 | 142,921,495 | 0 | 0 | MIT | 2018-07-30T19:51:26 | 2018-07-30T19:51:25 | null | UTF-8 | Python | false | false | 504 | py | #!/usr/bin/env python3
import sys
from typing import List
def form_letter(letter: str) -> tuple:
parts = [f':z_{letter}_{i}:' for i in range(4)]
return ''.join(parts[:2]), ''.join(parts[2:])
def form_word(word: str) -> str:
lines = [' '.join([form_letter(s)[i] for s in word]) for i in range(2)]
return '\n'.join(lines)
def hey_brether(words: List[str]) -> str:
return '\n\n'.join([form_word(w) for w in words])
if __name__ == "__main__":
print(hey_brether(sys.argv[1:]))
| [
"[email protected]"
] | |
776e156d066891997db6fdf2b1cfc8af363bc051 | d07b91e42e32b0a0642254a460bc56a546f60a63 | /source/lambdas/sns/handler.py | c8c6345aff30656a63532726508335396e977ac4 | [
"LicenseRef-scancode-unknown-license-reference",
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | emmanuellim/improving-forecast-accuracy-with-machine-learning | 81a30674f24d8249b7a55d6cce4fabe4f8fb4fdf | 2470b13c4b23861907c326cb2c3fdb6fbf4b2397 | refs/heads/master | 2023-01-14T13:41:42.978184 | 2020-11-24T19:07:35 | 2020-11-24T19:07:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,068 | py | # #####################################################################################################################
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
# #####################################################################################################################
import json
import os
from shared.Dataset.dataset_file import DatasetFile
from shared.helpers import get_sns_client
from shared.logging import get_logger
logger = get_logger(__name__)
def topic_arn():
"""
Get the SNS topic ARN from environment variable
:return: The SNS topic ARN
"""
return os.environ["SNS_TOPIC_ARN"]
def prepare_forecast_ready_message(event: dict):
"""
Prepare a message to notify users that forecasts are ready.
:param file: the DatasetFile that was updated to trigger this message
:return: message or none
"""
dataset_group = event.get("dataset_group_name")
message = f"Forecast for {dataset_group} is ready!"
return message
def build_message(event):
"""
Build a message for SNS to publish
:param event: the lambda event containing the message
:return: the message to publish
"""
message = ""
error = None
file = DatasetFile(event.get("dataset_file"), event.get("bucket"))
forecast_for = event.get("dataset_group_name", file.prefix)
if "statesError" in event.keys():
logger.info("State error message encountered")
message += f"There was an error running the forecast for {forecast_for}\n\n"
error = event.get("statesError")
if "serviceError" in event.keys():
logger.info("Service error message encountered")
message += (
f"There was a service error running the forecast for {forecast_for}\n\n"
)
error = event.get("serviceError")
if error:
error_type = error.get("Error", "Unknown")
error_cause = json.loads(error.get("Cause", "{}"))
error_message = error_cause.get("errorMessage")
stack_trace = error_cause.get("stackTrace")
message += f"Message: {error_message}\n\n"
if error_type == "DatasetsImporting":
message = f"Update for forecast {forecast_for}\n\n"
message += error_message
else:
message += f"Details: (caught {error_type})\n\n"
if stack_trace:
message += f"\n".join(stack_trace)
else:
message = prepare_forecast_ready_message(event)
return message
def sns(event, context):
"""
Send an SNS message
:param event: Lambda event
:param context: Lambda context
:return: None
"""
cli = get_sns_client()
message = build_message(event)
if message:
logger.info("Publishing message for event: %s" % event)
cli.publish(TopicArn=topic_arn(), Message=message)
else:
logger.info("No message to publish for event: %s" % event)
| [
"[email protected]"
] | |
6a6c9040cc8399e78b2e31e4a7d73c082ce17201 | 15e8a393f6c71ba77094a1718f4f89050409c7ae | /accounts/views.py | aa64a6b91daa4cef6e6f338622c3f42048dcea75 | [] | no_license | emilte/johansson | 21a3e20208c67725776af0f94de4c29150935b50 | d16bdde26e840814562f668904b2f5588c0a13ad | refs/heads/master | 2023-07-23T21:01:32.830302 | 2021-09-05T14:56:01 | 2021-09-05T14:56:01 | 390,360,563 | 0 | 0 | null | 2021-08-30T00:42:49 | 2021-07-28T13:26:31 | SCSS | UTF-8 | Python | false | false | 5,049 | py | # imports
import math
from openpyxl import Workbook
from django.views import View
from django.urls import reverse
from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect, FileResponse
from django.utils import timezone
from django.contrib import messages
from django.db.models import Q, Avg, Count, Min, Sum
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import get_user_model; User = get_user_model()
from django.contrib.auth import views as auth_views
from django.contrib.auth import authenticate, login, logout, update_session_auth_hash
from django.utils.decorators import method_decorator
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm, UserChangeForm, PasswordChangeForm
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import login_required, permission_required
from django.contrib.admin.views.decorators import staff_member_required, user_passes_test
from accounts import forms as account_forms
from accounts import models as account_models
# End: imports -----------------------------------------------------------------
profile_dec = [
]
@method_decorator(profile_dec, name='dispatch')
class ProfileView(View):
template = "accounts/profile.html"
def get(self, request, *args, **kwargs):
return render(request, self.template, {
})
@method_decorator(profile_dec, name='dispatch')
class EditProfileView(View):
template = "accounts/edit_profile.html"
form_class = account_forms.EditUserForm
def get(self, request, *args, **kwargs):
form = self.form_class(instance=request.user)
return render(request, self.template, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(data=request.POST, instance=request.user)
if form.is_valid():
form.save()
messages.add_message(request, messages.SUCCESS, f"Profilen din har blitt oppdatert")
return redirect('accounts:profile')
else:
return render(request, self.template, {'form': form})
class SignUpView(View):
template = "accounts/registration_form.html"
form_class = account_forms.SignUpForm
def get(self, request, *args, **kwargs):
form = self.form_class()
return render(request, self.template, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
try:
code = form.cleaned_data['code']
group = account_models.PermissionCode.objects.get(secret=code).group
user.groups.add(group)
messages.add_message(request, messages.SUCCESS, f"Med koden '{code}' har du blitt lagt til i avdeling: {group.name}")
except:
messages.add_message(request, messages.INFO, f"Koden '{code}' tilsvarer ingen avdeling. Ta kontakt med admin")
return redirect('home')
else:
return render(request, self.template, {'form': form})
@method_decorator(profile_dec, name='dispatch')
class DeleteUserView(View):
def get(self, request, *args, **kwargs):
request.user.delete()
logout(request)
messages.add_message(request, messages.SUCCESS, f"Brukeren din har blitt slettet fra systemet")
return redirect('home')
# Should use built in template AuthenticationForm
class LoginView(View):
template = "accounts/login.html"
def get(self, request, *args, **kwargs):
return render(request, self.template)
def post(self, request, *args, **kwargs):
email = request.POST['email']
password = request.POST['password']
user = authenticate(request, username=email, password=password)
error = None
if user is not None:
login(request, user)
return redirect('accounts:profile')
else:
error = "Feil"
return render(request, self.template, {'error': error})
@method_decorator(profile_dec, name='dispatch')
class LogoutView(View):
def get(self, request, *args, **kwargs):
logout(request)
return redirect('accounts:login')
@method_decorator(profile_dec, name='dispatch')
class ChangePasswordView(View):
template = "accounts/change_password.html"
form_class = account_forms.CustomPasswordChangeForm
#form_class = PasswordChangeForm
def get(self, request, *args, **kwargs):
form = self.form_class(request=request)
return render(request, self.template, {'form': form})
def post(self, request, *args, **kwargs):
form = self.form_class(data=request.POST, request=request)
if form.is_valid():
user = form.save()
update_session_auth_hash(request, user) # Important!
return redirect("accounts:profile")
return render(request, self.template, {'form': form})
| [
"[email protected]"
] | |
e3f4ebd0f7997eb84b8e98df2d8ea435590b9e7d | b049a961f100444dde14599bab06a0a4224d869b | /sdk/python/pulumi_azure_native/appplatform/v20230501preview/__init__.py | 1dd362384bee7b86480bef71d3f3221e6b6714a0 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | pulumi/pulumi-azure-native | b390c88beef8381f9a71ab2bed5571e0dd848e65 | 4c499abe17ec6696ce28477dde1157372896364e | refs/heads/master | 2023-08-30T08:19:41.564780 | 2023-08-28T19:29:04 | 2023-08-28T19:29:04 | 172,386,632 | 107 | 29 | Apache-2.0 | 2023-09-14T13:17:00 | 2019-02-24T20:30:21 | Python | UTF-8 | Python | false | false | 2,442 | py | # coding=utf-8
# *** WARNING: this file was generated by pulumi. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .api_portal import *
from .api_portal_custom_domain import *
from .apm import *
from .app import *
from .application_accelerator import *
from .application_live_view import *
from .binding import *
from .build_service_agent_pool import *
from .build_service_build import *
from .build_service_builder import *
from .buildpack_binding import *
from .certificate import *
from .config_server import *
from .configuration_service import *
from .container_registry import *
from .custom_domain import *
from .customized_accelerator import *
from .deployment import *
from .dev_tool_portal import *
from .gateway import *
from .gateway_custom_domain import *
from .gateway_route_config import *
from .get_api_portal import *
from .get_api_portal_custom_domain import *
from .get_apm import *
from .get_app import *
from .get_app_resource_upload_url import *
from .get_application_accelerator import *
from .get_application_live_view import *
from .get_binding import *
from .get_build_service_agent_pool import *
from .get_build_service_build import *
from .get_build_service_build_result_log import *
from .get_build_service_builder import *
from .get_build_service_resource_upload_url import *
from .get_buildpack_binding import *
from .get_certificate import *
from .get_config_server import *
from .get_configuration_service import *
from .get_container_registry import *
from .get_custom_domain import *
from .get_customized_accelerator import *
from .get_deployment import *
from .get_deployment_log_file_url import *
from .get_deployment_remote_debugging_config import *
from .get_dev_tool_portal import *
from .get_gateway import *
from .get_gateway_custom_domain import *
from .get_gateway_route_config import *
from .get_monitoring_setting import *
from .get_service import *
from .get_service_registry import *
from .get_storage import *
from .list_apm_secret_keys import *
from .list_build_service_builder_deployments import *
from .list_service_globally_enabled_apms import *
from .list_service_test_keys import *
from .monitoring_setting import *
from .service import *
from .service_registry import *
from .storage import *
from ._inputs import *
from . import outputs
| [
"[email protected]"
] | |
5ebd7f4f17ff38cdf95fc4df4a4fb4883473f0cf | 929886272e269e59596cf559e1c4fb26b6897e0c | /clinicstation/models.py | cc62f17a07ef387d7596027b7a816e80d1533442 | [
"Apache-2.0"
] | permissive | vedpr612/tscharts | aab287478407d64449d00c2f021611128a085c74 | 09a482622fc0f6cccc56b688aea81370ab137160 | refs/heads/master | 2020-03-09T01:23:19.170052 | 2018-04-05T02:56:36 | 2018-04-05T02:56:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,033 | py | #(C) Copyright Syd Logan 2016
#(C) Copyright Thousand Smiles Foundation 2016
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
from __future__ import unicode_literals
from django.db import models
from clinic.models import Clinic
from station.models import Station
from patient.models import Patient
'''
A clinic has stations. There are two models:
Station: simply a named location in the clinic. These records in
the database define the universe of all possible stations that a
clinic can be made up of. A station represents a class.
ClinicStation: defines an actual station for a particular clinic.
The station can be marked active or inactive. If inactive, it is
currently not seeing a patient, and the activepatient field
should be set to null (or None in Python). If active is True,
then the activepatient field should contain the ID of the patient
currently being seen. The station can "checkout" the activepatient
and that will cause the activepatient field to be set to NULL,
and the active field to be set to False.
The nextpatient field contains the ID of the next patient to be
seen by a station. When the station is not active, this patient
can be "checked in". When the patient is checked in, the station's
active field is set to True, and the activepatient field will be
assigned the nextpatient value. Then, nextpatient will be set to
the id of the patient next in the queue for this station.
away, awaytime, and willreturn are all used to indicate if the
station is currently manned (or not, perhaps the doctor is at
lunch).
'''
class ClinicStation(models.Model):
name = models.CharField(max_length=64)
station = models.ForeignKey(Station)
clinic = models.ForeignKey(Clinic)
active = models.BooleanField(default=False) # set to True if a patient is being seen
level = models.IntegerField(default=1) # relative importance to scheduler
away = models.BooleanField(default=True) # set to True when station is out to lunch
awaytime = models.IntegerField(default=30) # default minutes when station goes to away state before clinic is returned to (informational only)
willreturn = models.DateTimeField(auto_now_add=True) # estimated time of returen, computed when away is set to True, using the awaytime value
activepatient = models.ForeignKey(Patient, null=True, related_name='nextpatient') # if active, patient of null
nextpatient = models.ForeignKey(Patient, null=True, related_name="activepatient") # next patient to be seen or null
name_es = models.CharField(max_length=64)
| [
"[email protected]"
] | |
74093e47757ec7040332ba53788d5f88ec1a1317 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/th8.py | 709ebf6bed2ebb8351fa292a216420fb338751c9 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'tH8':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
5953457f8263fa4530caeacb67f0636517b15216 | 7bdfc595daee3490c5efbf543d31ff6821f26f07 | /demo/pyicdartools/rrc_evaluation_funcs.py | cc302e780588a1b8d093e63f403cbdb73243a260 | [] | no_license | 40647045S/RRPN_plusplus | b3f9bef2d97db45dd43534a23ff42398a0b08049 | 9f7c18c570bd4fbcc237e55c50573baf6365dac7 | refs/heads/master | 2023-04-28T00:21:51.370297 | 2021-05-05T12:30:36 | 2021-05-05T12:30:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,186 | py | #!/usr/bin/env python2
#encoding: UTF-8
import json
import sys;sys.path.append('./')
import zipfile
import re
import sys
import os
import codecs
import importlib
"""
#from StringIO import StringIO
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
"""
def print_help():
sys.stdout.write('Usage: python %s.py -g=<gtFile> -s=<submFile> -o=<outputFolder> [-i=<gtImagesFile> -p=<jsonParams>]' %sys.argv[0])
sys.exit(2)
def load_zip_file_keys(file,fileNameRegExp=''):
"""
Returns an array with the entries of the ZIP file that match with the regular expression.
The key's are the names or the file or the capturing group definied in the fileNameRegExp
"""
try:
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
except :
raise Exception('Error loading the ZIP archive.')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
if fileNameRegExp!="":
m = re.match(fileNameRegExp,name)
if m == None:
addFile = False
else:
if len(m.groups())>0:
keyName = m.group(1)
if addFile:
pairs.append( keyName )
return pairs
def load_zip_file(file,fileNameRegExp='',allEntries=False):
"""
Returns an array with the contents (filtered by fileNameRegExp) of a ZIP file.
The key's are the names or the file or the capturing group definied in the fileNameRegExp
allEntries validates that all entries in the ZIP file pass the fileNameRegExp
"""
try:
archive=zipfile.ZipFile(file, mode='r', allowZip64=True)
except :
raise Exception('Error loading the ZIP archive')
pairs = []
for name in archive.namelist():
addFile = True
keyName = name
if fileNameRegExp!="":
m = re.match(fileNameRegExp,name)
if m == None:
addFile = False
else:
if len(m.groups())>0:
keyName = m.group(1)
if addFile:
pairs.append( [ keyName , archive.read(name)] )
else:
if allEntries:
raise Exception('ZIP entry not valid: %s' %name)
return dict(pairs)
def decode_utf8(raw):
"""
Returns a Unicode object on success, or None on failure
"""
try:
raw = codecs.decode(raw,'utf-8', 'replace')
#extracts BOM if exists
raw = raw.encode('utf8')
if raw.startswith(codecs.BOM_UTF8):
raw = raw.replace(codecs.BOM_UTF8, '', 1)
return raw.decode('utf-8')
except:
return None
def validate_lines_in_file(fileName,file_contents,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
"""
This function validates that all lines of the file calling the Line validation function for each line
"""
utf8File = decode_utf8(file_contents)
if (utf8File is None) :
raise Exception("The file %s is not UTF-8" %fileName)
lines = utf8File.split( "\r\n" if CRLF else "\n" )
for line in lines:
line = line.replace("\r","").replace("\n","")
if(line != ""):
try:
validate_tl_line(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
except Exception as e:
raise Exception(("Line in sample not valid. Sample: %s Line: %s Error: %s" %(fileName,line,str(e))).encode('utf-8', 'replace'))
def validate_tl_line(line,LTRB=True,withTranscription=True,withConfidence=True,imWidth=0,imHeight=0):
"""
Validate the format of the line. If the line is not valid an exception will be raised.
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
Posible values are:
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
"""
get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight)
def get_tl_line_values(line,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0):
"""
Validate the format of the line. If the line is not valid an exception will be raised.
If maxWidth and maxHeight are specified, all points must be inside the imgage bounds.
Posible values are:
LTRB=True: xmin,ymin,xmax,ymax[,confidence][,transcription]
LTRB=False: x1,y1,x2,y2,x3,y3,x4,y4[,confidence][,transcription]
Returns values from a textline. Points , [Confidences], [Transcriptions]
"""
confidence = 0.0
transcription = "";
points = []
numPoints = 4;
if LTRB:
numPoints = 4;
if withTranscription and withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
if m == None :
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence,transcription")
elif withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,confidence")
elif withTranscription:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax,transcription")
else:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-9]+)\s*,\s*([0-9]+)\s*,?\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: xmin,ymin,xmax,ymax")
xmin = int(m.group(1))
ymin = int(m.group(2))
xmax = int(m.group(3))
ymax = int(m.group(4))
if(xmax<xmin):
raise Exception("Xmax value (%s) not valid (Xmax < Xmin)." %(xmax))
if(ymax<ymin):
raise Exception("Ymax value (%s) not valid (Ymax < Ymin)." %(ymax))
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
if (imWidth>0 and imHeight>0):
validate_point_inside_bounds(xmin,ymin,imWidth,imHeight);
validate_point_inside_bounds(xmax,ymax,imWidth,imHeight);
else:
numPoints = 8;
if withTranscription and withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence,transcription")
elif withConfidence:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*([0-1].?[0-9]*)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,confidence")
elif withTranscription:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,(.*)$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4,transcription")
else:
m = re.match(r'^\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*,\s*(-?[0-9]+)\s*$',line)
if m == None :
raise Exception("Format incorrect. Should be: x1,y1,x2,y2,x3,y3,x4,y4")
points = [ float(m.group(i)) for i in range(1, (numPoints+1) ) ]
validate_clockwise_points(points)
if (imWidth>0 and imHeight>0):
validate_point_inside_bounds(points[0],points[1],imWidth,imHeight);
validate_point_inside_bounds(points[2],points[3],imWidth,imHeight);
validate_point_inside_bounds(points[4],points[5],imWidth,imHeight);
validate_point_inside_bounds(points[6],points[7],imWidth,imHeight);
if withConfidence:
try:
confidence = float(m.group(numPoints+1))
except ValueError:
raise Exception("Confidence value must be a float")
if withTranscription:
posTranscription = numPoints + (2 if withConfidence else 1)
transcription = m.group(posTranscription)
m2 = re.match(r'^\s*\"(.*)\"\s*$',transcription)
if m2 != None : #Transcription with double quotes, we extract the value and replace escaped characters
transcription = m2.group(1).replace("\\\\", "\\").replace("\\\"", "\"")
return points,confidence,transcription
def validate_point_inside_bounds(x,y,imWidth,imHeight):
if(x<0 or x>imWidth):
raise Exception("X value (%s) not valid. Image dimensions: (%s,%s)" %(xmin,imWidth,imHeight))
if(y<0 or y>imHeight):
raise Exception("Y value (%s) not valid. Image dimensions: (%s,%s) Sample: %s Line:%s" %(ymin,imWidth,imHeight))
def validate_clockwise_points(points):
"""
Validates that the points that the 4 points that dlimite a polygon are in clockwise order.
"""
if len(points) != 8:
raise Exception("Points list not valid." + str(len(points)))
point = [
[int(points[0]) , int(points[1])],
[int(points[2]) , int(points[3])],
[int(points[4]) , int(points[5])],
[int(points[6]) , int(points[7])]
]
edge = [
( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),
( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),
( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),
( point[0][0] - point[3][0])*( point[0][1] + point[3][1])
]
summatory = edge[0] + edge[1] + edge[2] + edge[3];
if summatory>0:
raise Exception("Points are not clockwise. The coordinates of bounding quadrilaterals have to be given in clockwise order. Regarding the correct interpretation of 'clockwise' remember that the image coordinate system used is the standard one, with the image origin at the upper left, the X axis extending to the right and Y axis extending downwards.")
def get_tl_line_values_from_file_contents(content,CRLF=True,LTRB=True,withTranscription=False,withConfidence=False,imWidth=0,imHeight=0,sort_by_confidences=True):
"""
Returns all points, confindences and transcriptions of a file in lists. Valid line formats:
xmin,ymin,xmax,ymax,[confidence],[transcription]
x1,y1,x2,y2,x3,y3,x4,y4,[confidence],[transcription]
"""
pointsList = []
transcriptionsList = []
confidencesList = []
lines = content.split( "\r\n" if CRLF else "\n" )
for line in lines:
line = line.replace("\r","").replace("\n","")
if(line != "") :
points, confidence, transcription = get_tl_line_values(line,LTRB,withTranscription,withConfidence,imWidth,imHeight);
pointsList.append(points)
transcriptionsList.append(transcription)
confidencesList.append(confidence)
if withConfidence and len(confidencesList)>0 and sort_by_confidences:
confidencesList, pointsList,transcriptionsList = (list(t) for t in zip(*sorted(zip(confidencesList, pointsList, transcriptionsList), reverse=True)))
return pointsList,confidencesList,transcriptionsList
def main_evaluation(p,default_evaluation_params_fn,validate_data_fn,evaluate_method_fn,show_result=True,per_sample=True):
"""
This process validates a method, evaluates it and if it succed generates a ZIP file with a JSON entry for each sample.
Params:
p: Dictionary of parmeters with the GT/submission locations. If None is passed, the parameters send by the system are used.
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
validate_data_fn: points to a method that validates the corrct format of the submission
evaluate_method_fn: points to a function that evaluated the submission and return a Dictionary with the results
"""
# check path
gt_path = p['g']
submit_path = p['s']
output = p['o']
#print('gt', gt_path)
#print('submit', submit_path)
#print('output', output)
if (p == None):
p = dict([s[1:].split('=') for s in sys.argv[1:]])
if(len(sys.argv)<2):
print_help()
evalParams = default_evaluation_params_fn()
if 'p' in p.keys():
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) )
resDict={'calculated':True,'Message':'','method':'{}','per_sample':'{}'}
try:
validate_data_fn(p['g'], p['s'], evalParams)
evalData = evaluate_method_fn(p['g'], p['s'], evalParams)
# print("evalData:", evalData)
resDict.update(evalData)
except Exception as e:
resDict['Message']= str(e)
resDict['calculated']=False
#print('gt',p['g']) # addres of gt
#print('sub',p['s'])
if not os.path.exists(p['o']):
os.makedirs(p['o'])
resultsOutputname = p['o'] + '/results.zip'
outZip = zipfile.ZipFile(resultsOutputname, mode='w', allowZip64=True)
# del resDict['per_sample']
if 'output_items' in resDict.keys():
del resDict['output_items']
outZip.writestr('method.json',json.dumps(resDict))
if not resDict['calculated']:
if show_result:
sys.stderr.write('Error!\n'+ resDict['Message']+'\n\n')
outZip.close()
return resDict
if per_sample == True:
for k,v in evalData['per_sample'].items():
outZip.writestr( k + '.json',json.dumps(v))
if 'output_items' in evalData.keys():
for k, v in evalData['output_items'].iteritems():
outZip.writestr( k,v)
outZip.close()
if show_result:
sys.stdout.write("Calculated!")
sys.stdout.write(json.dumps(resDict['method']))
return resDict
def main_validation(default_evaluation_params_fn,validate_data_fn):
"""
This process validates a method
Params:
default_evaluation_params_fn: points to a function that returns a dictionary with the default parameters used for the evaluation
validate_data_fn: points to a method that validates the corrct format of the submission
"""
try:
p = dict([s[1:].split('=') for s in sys.argv[1:]])
evalParams = default_evaluation_params_fn()
if 'p' in p.keys():
evalParams.update( p['p'] if isinstance(p['p'], dict) else json.loads(p['p'][1:-1]) )
validate_data_fn(p['g'], p['s'], evalParams)
print('SUCCESS')
sys.exit(0)
except Exception as e:
print(str(e))
sys.exit(101) | [
"[email protected]"
] | |
b8068d18d1dfcb398cb0e4564f4460bd7017fa22 | 46667df8344db58698838d677bdae377b3c3c53c | /Data Manipulation with Pandas/Part 2/24.downsampling-data.py | 066a85b0f5cfb6d72099f5c86d7950dd93723f2b | [] | no_license | bennysetiawan/DQLab-Career-2021 | 278577cdddb3852c57f799cd1207b4ff45962960 | 0822d15e3b24cf0146c23456d4b65b0fb00a53fc | refs/heads/master | 2023-06-06T13:24:21.289929 | 2021-06-23T17:09:14 | 2021-06-23T17:09:14 | 379,657,598 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | import pandas as pd
# Load dataset https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/LO4/global_air_quality_4000rows.csv
gaq = pd.read_csv('https://dqlab-dataset.s3-ap-southeast-1.amazonaws.com/LO4/global_air_quality_4000rows.csv')
gaq['timestamp'] = pd.to_datetime(gaq['timestamp'])
gaq = gaq.set_index('timestamp')
print('Dataset sebelum di-downsampling (5 teratas):\n', gaq.head())
# [1] Downsampling dari daily to weekly dan kita hitung maksimum untuk seminggu
gaq_weekly = gaq.resample('W').max()
print('Downsampling daily to weekly - max (5 teratas):\n', gaq_weekly.head())
# [2] Downsampling dari daily to quaterly dan kita hitung minimumnya untuk tiap quarter
gaq_quaterly = gaq.resample('Q').min()
print('Downsampling daily to quaterly - min (5 teratas):\n', gaq_quaterly.head()) | [
"[email protected]"
] | |
aae8ed71564aa67c6ec5384655345127b605987a | 3b2940c38412e5216527e35093396470060cca2f | /top/api/rest/CrmGrademktMemberDetailCreateRequest.py | 46b878b46708a1903474d989185d6c8580933eda | [] | no_license | akingthink/goods | 842eb09daddc2611868b01ebd6e330e5dd7d50be | ffdb5868a8df5c2935fc6142edcdf4c661c84dca | refs/heads/master | 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 365 | py | '''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class CrmGrademktMemberDetailCreateRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.feather = None
self.parameter = None
def getapiname(self):
return 'taobao.crm.grademkt.member.detail.create'
| [
"[email protected]"
] | |
6e7ab0d6a2cb4966bcce0938269d82f81bbe5888 | 134267f2244954d48c65daae0b58051aba757fed | /lucky.py | 6d7541f6ac39af4ddc31c088dfa1988c61387f8c | [] | no_license | mobin-zaman/misc_python | 47fe836d1eae154210912b8b353f241303523e6b | 7a22329ae38b2d5ee9cd9ce29d995686759f5f87 | refs/heads/master | 2020-04-28T00:48:06.774434 | 2019-07-24T15:28:15 | 2019-07-24T15:28:15 | 174,829,343 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 494 | py | #! python3
#lucky.py - opens several google search results
import requests, sys, webbrowser, bs4
print('Googling....')
res=requests.get('http://google.com/search?q=' + ' '.join(sys.argv[1:]))
res.raise_for_status()
#TODO: Retrive top search result links.
soup=bs4.BeautifulSoup(res.text)
#TODO: Open a browser tab for each result
linkElems = soup.select('.r a')
numOpen=min(5,len(linkElems))
for i in range(numOpen):
webbrowser.open('http://google.com'+linkElems[i].get('href'))
| [
"[email protected]"
] | |
54e679afc65bea0590837f01b49bdf2be09aece1 | 58cd392c642ac9408349f03dc72927db6abcce55 | /team2/src/Without_Doubt_Project/venv/lib/python3.6/site-packages/tbears/libs/icx_signer.py | 8e3c035124d75dddc7b9979edb461cd6fd3fbba1 | [] | no_license | icon-hackathons/201902-dapp-competition-bu | 161226eb792425078351c790b8795a0fe5550735 | f3898d31a20f0a85637f150d6187285514528d53 | refs/heads/master | 2020-04-24T07:48:18.891646 | 2019-04-18T01:47:21 | 2019-04-18T01:47:21 | 171,809,810 | 3 | 11 | null | 2019-04-18T01:47:23 | 2019-02-21T06:01:04 | Python | UTF-8 | Python | false | false | 2,922 | py | # -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
from eth_keyfile import extract_key_from_keyfile
from secp256k1 import PrivateKey
from tbears.tbears_exception import KeyStoreException
def key_from_key_store(file_path: str, password: (bytes, str)) -> bytes:
"""Get private key from keystore file.
:param file_path: keystore file path.
:param password: password of keystore file.
:return: private key
"""
try:
with open(file_path, 'rb') as file:
private_key = extract_key_from_keyfile(file, password)
except ValueError:
raise KeyStoreException('Invalid password.')
except Exception as e:
raise KeyStoreException(f'keystore file error.{e}')
else:
return private_key
class IcxSigner:
"""Class for creating a recoverable ECDSA signature using a private key."""
def __init__(self, private_key: bytes):
self._private_key = private_key
self._private_key_object = PrivateKey(self._private_key)
def sign_recoverable(self, msg_hash):
"""Make a recoverable signature using message hash data.
We can extract public key from recoverable signature.
:param msg_hash: Hash data of message. type(bytes)
:return:
type(tuple)
type(bytes): 65 bytes data, type(int): recovery id
"""
private_key_object = self._private_key_object
recoverable_signature = private_key_object.ecdsa_sign_recoverable(msg_hash, raw=True)
return private_key_object.ecdsa_recoverable_serialize(recoverable_signature)
def sign(self, msg_hash) -> bytes:
"""Make base64-encoded string of recoverable signature data.
:param msg_hash: Hash data of message. type(bytes)
:return: base64-encoded string of recoverable signature data
"""
# 'msg_hash' argument must be 256 bits (made by hashlib.sha256() method)
signature, recovery_id = self.sign_recoverable(msg_hash)
recoverable_sig = bytes(bytearray(signature) + recovery_id.to_bytes(1, 'big'))
return base64.b64encode(recoverable_sig)
@property
def public_key(self) -> bytes:
return self._private_key_object.pubkey.serialize(compressed=False)
@property
def address(self) -> bytes:
return hashlib.sha3_256(self.public_key[1:]).digest()[-20:]
| [
"[email protected]"
] | |
56d1b915aa3f5e63035a47824c207613f2bf5480 | 3b84c4b7b16ccfd0154f8dcb75ddbbb6636373be | /google-cloud-sdk/lib/googlecloudsdk/third_party/apis/gameservices/v1beta/resources.py | 101446b0908fca46ce29693f795216e4eeaf8876 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | twistedpair/google-cloud-sdk | 37f04872cf1ab9c9ce5ec692d2201a93679827e3 | 1f9b424c40a87b46656fc9f5e2e9c81895c7e614 | refs/heads/master | 2023-08-18T18:42:59.622485 | 2023-08-15T00:00:00 | 2023-08-15T12:14:05 | 116,506,777 | 58 | 24 | null | 2022-02-14T22:01:53 | 2018-01-06T18:40:35 | Python | UTF-8 | Python | false | false | 2,945 | py | # -*- coding: utf-8 -*- #
# Copyright 2015 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Resource definitions for cloud platform apis."""
import enum
BASE_URL = 'https://gameservices.googleapis.com/v1beta/'
DOCS_URL = 'https://cloud.google.com/solutions/gaming/'
class Collections(enum.Enum):
"""Collections for all supported apis."""
PROJECTS = (
'projects',
'projects/{projectsId}',
{},
['projectsId'],
True
)
PROJECTS_LOCATIONS = (
'projects.locations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_GAMESERVERDEPLOYMENTS = (
'projects.locations.gameServerDeployments',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/'
'gameServerDeployments/{gameServerDeploymentsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_GAMESERVERDEPLOYMENTS_CONFIGS = (
'projects.locations.gameServerDeployments.configs',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/'
'gameServerDeployments/{gameServerDeploymentsId}/configs/'
'{configsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_OPERATIONS = (
'projects.locations.operations',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/operations/'
'{operationsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REALMS = (
'projects.locations.realms',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/realms/'
'{realmsId}',
},
['name'],
True
)
PROJECTS_LOCATIONS_REALMS_GAMESERVERCLUSTERS = (
'projects.locations.realms.gameServerClusters',
'{+name}',
{
'':
'projects/{projectsId}/locations/{locationsId}/realms/'
'{realmsId}/gameServerClusters/{gameServerClustersId}',
},
['name'],
True
)
def __init__(self, collection_name, path, flat_paths, params,
enable_uri_parsing):
self.collection_name = collection_name
self.path = path
self.flat_paths = flat_paths
self.params = params
self.enable_uri_parsing = enable_uri_parsing
| [
"[email protected]"
] | |
dfeb592665b2b0016aab64fdaa9c63d96cf44147 | 33f752443cbb38d3cb4d9d40982b2a2d824acb81 | /demo/django/api/migrations/0001_initial.py | 6227ca3a23ca0dd634450cc0af400b99742f755b | [
"MIT"
] | permissive | denisroldan/django-angular-dynamic-forms | b03b4f20751c609733356bea1a7141da29f9de54 | f50de1c74db727e565756f40344c29bbab1b3910 | refs/heads/master | 2020-04-03T10:11:41.776970 | 2019-08-20T09:28:30 | 2019-08-20T09:28:30 | 155,186,503 | 0 | 0 | null | 2018-10-29T09:38:19 | 2018-10-29T09:38:19 | null | UTF-8 | Python | false | false | 1,193 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-24 10:43
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('street', models.CharField(blank=True, max_length=100, null=True)),
('number', models.CharField(blank=True, max_length=100, null=True)),
],
),
migrations.CreateModel(
name='City',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
],
),
migrations.AddField(
model_name='address',
name='city',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='api.City'),
),
]
| [
"[email protected]"
] | |
fa45b11c2a495772e67bb46f3588e19d43441dc4 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/158/usersdata/264/68784/submittedfiles/imc.py | f8dd970b68f227049dd18bb352f6bc264eb43c0c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 205 | py | # -*- coding: utf-8 -*-
#Entrada:
peso= float(input('Digite o valor do peso em Kg:'))
altura= float(input('Digite o valor da altura em metros:'))
#Processamento:
imc= ((peso)/(altura**2))
#Saída:
if (imc> | [
"[email protected]"
] | |
81b7ab4f7250f1069808eec54a918272419cfc76 | cac9c211a4eeb55cfd61d8e5c54a1d4082c4de33 | /Experimental/WindowsPyDbg/pydbg.py | a95e030b2d8bbea4226e47fcb94415efda160ae7 | [
"BSD-3-Clause"
] | permissive | vchateauneu/survol | 8c8b5db67f81c6400c3e2f4b84b57fb83d69fb1f | 2b5be9d28115f8f9b1dd91bf05449c92bf9a9926 | refs/heads/master | 2020-03-21T09:11:37.765314 | 2018-07-03T20:40:16 | 2018-07-03T20:40:16 | 138,387,051 | 1 | 0 | null | 2018-06-23T09:05:45 | 2018-06-23T09:05:45 | null | UTF-8 | Python | false | false | 138,482 | py | #!c:\python\python.exe
#
# PyDBG
# Copyright (C) 2006 Pedram Amini <[email protected]>
#
# $Id: pydbg.py 253 2011-01-24 19:13:57Z my.name.is.sober $
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation;
# either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
"""
@author: Pedram Amini
@license: GNU General Public License 2.0 or later
@contact: [email protected]
@organization: www.openrce.org
"""
import os.path
import sys
import copy
import signal
import struct
import pydasm
import socket
from ctypes import (
windll, CDLL, byref, sizeof, c_char_p, c_int,
c_ulong, create_string_buffer,
)
# Mac OS compatibility
try:
kernel32 = windll.kernel32
advapi32 = windll.advapi32
ntdll = windll.ntdll
iphlpapi = windll.iphlpapi
except:
kernel32 = CDLL(os.path.join(os.path.dirname(__file__), 'libmacdll.dylib'))
advapi32 = kernel32
from pydbg.breakpoints import Breakpoint, MemBreakpoint, HwBreakpoint
from pydbg.mem_snapshot import MemSnapshotBlock, MemSnapshotContext
from pydbg.systemdll import SystemDLL
from pydbg.errors import PDError
from pydbg.windows_h import (
SYSTEM_INFO, LDT_ENTRY, PROCESS_INFORMATION, MEMORY_BASIC_INFORMATION,
DWORD, HANDLE, LUID, TOKEN_PRIVILEGES, CONTEXT, STARTUPINFO,
)
from pydbg.defines import (
PAGE_GUARD, HW_ACCESS, HW_EXECUTE, HW_WRITE,
DBG_CONTINUE, DEBUG_EVENT, DBG_EXCEPTION_NOT_HANDLED,
CREATE_PROCESS_DEBUG_EVENT, CREATE_THREAD_DEBUG_EVENT,
EXIT_THREAD_DEBUG_EVENT, EXIT_PROCESS_DEBUG_EVENT,
LOAD_DLL_DEBUG_EVENT, UNLOAD_DLL_DEBUG_EVENT, EXCEPTION_DEBUG_EVENT,
EXCEPTION_ACCESS_VIOLATION, EXCEPTION_BREAKPOINT,
EXCEPTION_SINGLE_STEP, EXCEPTION_GUARD_PAGE, USER_CALLBACK_DEBUG_EVENT,
TH32CS_SNAPPROCESS, TH32CS_SNAPTHREAD, TH32CS_SNAPMODULE,
MODULEENTRY32, PROCESSENTRY32, THREADENTRY32, INVALID_HANDLE_VALUE,
EFLAGS_TRAP, TOKEN_ADJUST_PRIVILEGES, SE_PRIVILEGE_ENABLED,
CONTEXT_FULL, CONTEXT_DEBUG_REGISTERS, DEBUG_ONLY_THIS_PROCESS,
DEBUG_PROCESS, CREATE_NEW_CONSOLE, THREAD_ALL_ACCESS,
PROCESS_ALL_ACCESS, MIB_TCP_STATE_LISTEN, MIB_TCPTABLE_OWNER_PID,
MIB_UDPTABLE_OWNER_PID, AF_INET, UDP_TABLE_OWNER_PID,
TCP_TABLE_OWNER_PID_ALL, PAGE_READONLY, PAGE_EXECUTE, PAGE_NOACCESS,
PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE, PAGE_EXECUTE_WRITECOPY,
PAGE_READWRITE, MEM_COMMIT, MEM_IMAGE, SYSDBG_MSR,
SysDbgReadMsr, SysDbgWriteMsr, FORMAT_MESSAGE_ALLOCATE_BUFFER,
FORMAT_MESSAGE_FROM_SYSTEM,
)
class PyDBG(object):
"""
This class implements standard low level functionality including:
- The load() / attach() routines.
- The main debug event loop.
- Convenience wrappers for commonly used Windows API.
- Single step toggling routine.
- Win32 error handler wrapped around PDError.
- Base exception / event handler routines which are meant to be overridden.
Higher level functionality is also implemented including:
- Register manipulation.
- Soft (INT 3) breakpoints.
- Memory breakpoints (page permissions).
- Hardware breakpoints.
- Exception / event handling call backs.
- Pydasm (libdasm) disassembly wrapper.
- Process memory snapshotting and restoring.
- Endian manipulation routines.
- Debugger hiding.
- Function resolution.
- "Intelligent" memory dereference.
- Stack/SEH unwinding.
- Etc...
"""
STRING_EXPLORATON_BUF_SIZE = 256
STRING_EXPLORATION_MIN_LENGTH = 2
HW_SLOTS = {0, 1, 2, 3}
def __init__(self, ff=True, cs=False):
"""
Set the default attributes.
See the source if you want to modify the default creation values.
@type ff: Boolean
@param ff: (Optional, Def=True) Flag controlling whether or not pydbg attaches to forked processes
@type cs: Boolean
@param cs: (Optional, Def=False) Flag controlling whether or not pydbg is in client/server (socket) mode
"""
# private variables, internal use only:
self._restore_breakpoint = None # breakpoint to restore
self._guarded_pages = set() # specific pages we set PAGE_GUARD on
self._guards_active = True # flag specifying whether or not guard pages are active
self.page_size = 0 # memory page size (dynamically resolved at run-time)
self.pid = 0 # debuggee's process id
self.h_process = None # debuggee's process handle
self.h_thread = None # handle to current debuggee thread
self.debugger_active = True # flag controlling the main debugger event handling loop
self.follow_forks = ff # flag controlling whether or not pydbg attaches to forked processes
self.client_server = cs # flag controlling whether or not pydbg is in client/server mode
self.callbacks = {} # exception callback handler dictionary
self.system_dlls = [] # list of loaded system dlls
self.dirty = False # flag specifying that the memory space of the debuggee was modified
self.system_break = None # the address at which initial and forced breakpoints occur at
self.peb = None # process environment block address
self.tebs = {} # dictionary of thread IDs to thread environment block addresses
# internal variables specific to the last triggered exception.
self.context = None # thread context of offending thread
self.dbg = None # DEBUG_EVENT
self.exception_address = None # from dbg.u.Exception.ExceptionRecord.ExceptionAddress
self.write_violation = None # from dbg.u.Exception.ExceptionRecord.ExceptionInformation[0]
self.violation_address = None # from dbg.u.Exception.ExceptionRecord.ExceptionInformation[1]
self.exception_code = None # from dbg.u.Exception.ExceptionRecord.ExceptionCode
self.breakpoints = {} # internal breakpoint dictionary, keyed by address
self.memory_breakpoints = {} # internal memory breakpoint dictionary, keyed by base address
self.hardware_breakpoints = {} # internal hardware breakpoint array, indexed by slot (0-3 inclusive)
self.memory_snapshot_blocks = [] # list of memory blocks at time of memory snapshot
self.memory_snapshot_contexts = [] # list of threads contexts at time of memory snapshot
self.first_breakpoint = True # this flag gets disabled once the windows initial break is handled
# Address of hit memory breakpoint or zero on miss
# designates whether or not the violation was in reaction to
# a memory breakpoint hit or other unrelated event.
self.memory_breakpoint_hit = 0
# Hardware breakpoint on hit or None on miss
# designates whether or not the single step event was in reaction to
# a hardware breakpoint hit or other unrelated event.
self.hardware_breakpoint_hit = None
self.instruction = None # pydasm instruction object, propagated by self.disasm()
self.mnemonic = None # pydasm decoded instruction mnemonic, propagated by self.disasm()
self.op1 = None # pydasm decoded 1st operand, propagated by self.disasm()
self.op2 = None # pydasm decoded 2nd operand, propagated by self.disasm()
self.op3 = None # pydasm decoded 3rd operand, propagated by self.disasm()
# control debug/error logging.
self._log = lambda msg: sys.stderr.write('[PDBG_LOG] {}\n'.format(msg))
self._err = lambda msg: sys.stderr.write('[PDBG_ERR] {}\n'.format(msg))
# determine the system page size.
system_info = SYSTEM_INFO()
kernel32.GetSystemInfo(byref(system_info))
self.page_size = system_info.dwPageSize
# Determine the system DbgBreakPoint address.
# This is the address at which initial and forced breaks happen.
# XXX - need to look into fixing this for pydbg client/server.
self.system_break = self.func_resolve('ntdll.dll', 'DbgBreakPoint')
self._log('system page size is {}'.format(self.page_size))
def addr_to_dll(self, address):
"""
Return the system DLL that contains the address specified.
@type address: DWORD
@param address: Address to search system DLL ranges for
@rtype: SystemDLL
@return: System DLL that contains the address specified or None if not found.
"""
for dll in self.system_dlls:
if dll.base < address < (dll.base + dll.size):
return dll
return None
def addr_to_module(self, address):
"""
Return the MODULEENTRY32 structure for the module that contains the address specified.
@type address: DWORD
@param address: Address to search loaded module ranges for
@rtype: MODULEENTRY32
@return: MODULEENTRY32 strucutre that contains the address specified or None if not found.
"""
found = None
for module in self.iterate_modules():
if module.modBaseAddr < address < module.modBaseAddr + module.modBaseSize:
# we have to make a copy of the 'module' since it is an iterator and will be blown away.
# the reason we can't "break" out of the loop is because there will be a handle leak.
# and we can't use enumerate_modules() because we need the entire module structure.
# so there...
found = copy.copy(module)
return found
def attach(self, pid):
"""Attach to the specified process by PID
Saves a process handle in self.h_process and prevents debuggee from
exiting on debugger quit.
@type pid: Integer
@param pid: Process ID to attach to
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('attaching to pid {}'.format(pid))
# obtain necessary debug privileges.
self.get_debug_privileges()
self.pid = pid
self.open_process(pid)
self.debug_active_process(pid)
# allow detaching on systems that support it.
try:
self.debug_set_process_kill_on_exit(False)
except PDError as err:
self._err(err)
pass
# enumerate the TEBs and add them to the internal dictionary.
for thread_id in self.enumerate_threads():
thread_handle = self.open_thread(thread_id)
thread_context = self.get_thread_context(thread_handle)
selector_entry = LDT_ENTRY()
success = kernel32.GetThreadSelectorEntry(thread_handle,
thread_context.SegFs,
byref(selector_entry))
if not success:
self.win32_error('GetThreadSelectorEntry()')
self.close_handle(thread_handle)
teb = selector_entry.BaseLow
teb += ((selector_entry.HighWord.Bits.BaseMid << 16)
+ (selector_entry.HighWord.Bits.BaseHi << 24))
# add this TEB to the internal dictionary.
self.tebs[thread_id] = teb
# if the PEB has not been set yet, do so now.
if not self.peb:
self.peb = self.read_process_memory(teb + 0x30, 4)
self.peb = struct.unpack('<L', self.peb)[0]
return self.ret_self()
def bp_del(self, address):
"""
Removes the breakpoint from target address.
@see: bp_set(), bp_del_all(), bp_is_ours()
@type address: DWORD or List
@param address: Address or list of addresses to remove breakpoint from
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
# if a list of addresses to remove breakpoints from was supplied.
if isinstance(address, list):
# pass each lone address to ourself.
for addr in address:
self.bp_del(addr)
return self.ret_self()
self._log('bp_del(0x{:08x})'.format(address))
# ensure a breakpoint exists at the target address.
if address in self.breakpoints:
# restore the original byte.
self.write_process_memory(address, self.breakpoints[address].original_byte)
self.set_attr('dirty', True)
# remove the breakpoint from the internal list.
del self.breakpoints[address]
return self.ret_self()
def bp_del_all(self):
"""
Removes all breakpoints from the debuggee.
@see: bp_set(), bp_del(), bp_is_ours()
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('bp_del_all()')
for bp in list(self.breakpoints):
self.bp_del(bp)
return self.ret_self()
def bp_del_hw(self, address=None, slot=None):
"""
Removes the hardware breakpoint from the specified address or slot.
Either an address or a slot must be specified, but not both.
@see: bp_set_hw(), bp_del_hw_all()
@type address: DWORD
@param address: (Optional) Address to remove hardware breakpoint from.
@type slot: Integer (0 through 3)
@param slot: (Optional)
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
if address is None and slot is None:
raise PDError('hw bp address or slot # must be specified.')
if not address and slot not in self.HW_SLOTS:
raise PDError('invalid hw bp slot: {}. valid range is 0 through 3'.format(slot))
# de-activate the hardware breakpoint for all active threads.
for thread_id in self.enumerate_threads():
context = self.get_thread_context(thread_id=thread_id)
if address:
if context.Dr0 == address:
slot = 0
elif context.Dr1 == address:
slot = 1
elif context.Dr2 == address:
slot = 2
elif context.Dr3 == address:
slot = 3
# mark slot as inactive.
# bits 0, 2, 4, 6 for local (L0 - L3)
# bits 1, 3, 5, 7 for global (L0 - L3)
context.Dr7 &= ~(1 << (slot * 2))
# remove address from the specified slot.
if slot == 0:
context.Dr0 = 0x00000000
elif slot == 1:
context.Dr1 = 0x00000000
elif slot == 2:
context.Dr2 = 0x00000000
elif slot == 3:
context.Dr3 = 0x00000000
# Remove the condition (RW0 - RW3) field
# from the appropriate slot (bits 16/17, 20/21, 24,25, 28/29)
context.Dr7 &= ~(3 << ((slot * 4) + 16))
# Remove the length (LEN0-LEN3) field
# from the appropriate slot (bits 18/19, 22/23, 26/27, 30/31)
context.Dr7 &= ~(3 << ((slot * 4) + 18))
# set the thread context.
self.set_thread_context(context, thread_id=thread_id)
# Remove the breakpoint from the internal list.
del self.hardware_breakpoints[slot]
return self.ret_self()
def bp_del_hw_all(self):
"""
Removes all hardware breakpoints from the debuggee.
@see: bp_set_hw(), bp_del_hw()
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
# Don't use:
# for slot in self.hardware_breakpoints:
# self.bp_del_hw(slot=slot)
# Because self.bp_del_hw() changes self.hardware_breakpoints dict
for slot in list(self.hardware_breakpoints):
self.bp_del_hw(slot=slot)
return self.ret_self()
def bp_del_mem(self, address):
"""
Removes the memory breakpoint from target address.
@see: bp_del_mem_all(), bp_set_mem(), bp_is_ours_mem()
@type address: DWORD
@param address: Address or list of addresses to remove memory breakpoint from
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('bp_del_mem(0x{:08x})'.format(address))
# ensure a memory breakpoint exists at the target address.
if address not in self.memory_breakpoints:
return self.ret_self()
size = self.memory_breakpoints[address].size
mbi = self.memory_breakpoints[address].mbi
# remove the memory breakpoint from our internal list.
del self.memory_breakpoints[address]
# page-aligned target memory range.
start = mbi.BaseAddress
end = address + size # non page-aligned range end
end = end + self.page_size - (end % self.page_size) # page-aligned range end
# for each page in the target range, restore
# the original page permissions if no other breakpoint exists.
for page in range(start, end, self.page_size):
other_bp_found = False
for mem_bp in self.memory_breakpoints.values():
if page <= mem_bp.address < page + self.page_size:
other_bp_found = True
break
if page <= mem_bp.address + size < page + self.page_size:
other_bp_found = True
break
if not other_bp_found:
try:
self.virtual_protect(page, 1, mbi.Protect & ~PAGE_GUARD)
# remove the page from the set of tracked GUARD pages.
self._guarded_pages.remove(mbi.BaseAddress)
except:
pass
return self.ret_self()
def bp_del_mem_all(self):
"""
Removes all memory breakpoints from the debuggee.
@see: bp_del_mem(), bp_set_mem(), bp_is_ours_mem()
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('bp_del_mem_all()')
for address in list(self.memory_breakpoints):
self.bp_del_mem(address)
return self.ret_self()
def bp_is_ours(self, address_to_check):
"""
Determine if a breakpoint address belongs to us.
@see: bp_set(), bp_del(), bp_del_all()
@type address_to_check: DWORD
@param address_to_check: Address to check if we have set a breakpoint at
@rtype: Bool
@return: True if breakpoint in question is ours, False otherwise
"""
return address_to_check in self.breakpoints
def bp_is_ours_mem(self, address_to_check):
"""
Determines if the specified address falls within the range of one of
our memory breakpoints.
When handling potential memory breakpoint exceptions it is mandatory
to check the offending address with this routine as memory breakpoints
are implemented by changing page permissions and the referenced address
may very well exist within the same page as a memory breakpoint
but not within the actual range of the buffer we wish to break on.
@see: bp_set_mem(), bp_del_mem(), bp_del_mem_all()
@type address_to_check: DWORD
@param address_to_check: Address to check if we have set a breakpoint on
@rtype: Mixed
@return: The starting address of the buffer our breakpoint triggered on
or False if address falls outside range.
"""
for address in self.memory_breakpoints:
size = self.memory_breakpoints[address].size
if address <= address_to_check <= address + size:
return address
return False
def bp_set(self, address, description="", restore=True, handler=None):
"""Sets a breakpoint at the designated address.
Register an EXCEPTION_BREAKPOINT callback handler to catch
breakpoint events. If a list of addresses is submitted to this routine
then the entire list of new breakpoints get the same description and
restore.
The optional "handler" parameter can be used to identify a function to
specifically handle the specified bp, as opposed to the generic
bp callback handler. The prototype of the callback routines is::
func (pydbg)
return DBG_CONTINUE # or other continue status
@see: bp_is_ours(), bp_del(), bp_del_all()
@type address: DWORD or List
@param address: Address or list of addresses to set breakpoint at
@type description: String
@param description: (Optional) Description to associate with this breakpoint
@type restore: Bool
@param restore: (Optional, def=True) Flag controlling whether or not to restore the breakpoint
@type handler: Function Pointer
@param handler: (Optional, def=None) Optional handler to call for this bp instead of the default handler
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
# If a list of addresses to set breakpoints on from was supplied
if isinstance(address, list):
# Pass each lone address to ourselves
# (each one gets the same description / restore flag).
for addr in address:
self.bp_set(addr, description, restore, handler)
return self.ret_self()
self._log('bp_set(0x{:08x})'.format(address))
# Ensure a breakpoint doesn't already exist at the target address.
if address not in self.breakpoints:
try:
# save the original byte at the requested breakpoint address.
original_byte = self.read_process_memory(address, 1)
# write an int3 into the target process space.
self.write_process_memory(address, '\xCC')
self.set_attr('dirty', True)
# add the breakpoint to the internal list.
self.breakpoints[address] = Breakpoint(address, original_byte,
description, restore, handler)
except:
raise PDError('Failed setting breakpoint at 0x{:08x}'.format(address))
return self.ret_self()
def bp_set_hw(self, address, length, condition,
description="", restore=True, handler=None):
"""Sets a hardware breakpoint at the designated address.
Register an EXCEPTION_SINGLE_STEP callback handler to catch
hardware breakpoint events. Setting hardware breakpoints requires
the internal h_thread handle be set. This means that you can not set
one outside the context of an debug event handler.
If you want to set a hardware breakpoint as soon as you attach to or
load a process, do so in the first chance breakpoint handler.
For more information regarding the Intel x86 debug registers and
hardware breakpoints see::
http://pdos.csail.mit.edu/6.828/2005/readings/ia32/IA32-3.pdf
Section 15.2
Alternatively, you can register a custom handler to handle hits on
the specific hw breakpoint slot.
*Warning: Setting hardware breakpoints during the first system
breakpoint will be removed upon process continue. A better approach
is to set a software breakpoint that when hit will set your hardware breakpoints.
@note: Hardware breakpoints are handled globally throughout
the entire process and not a single specific thread.
@see: bp_del_hw(), bp_del_hw_all()
@type address: DWORD
@param address: Address to set hardware breakpoint at
@type length: Integer (1, 2 or 4)
@param length: Size of hardware breakpoint in bytes (byte, word or dword)
@type condition: Integer (HW_ACCESS, HW_WRITE, HW_EXECUTE)
@param condition: Condition to set the hardware breakpoint to activate on
@type description: String
@param description: (Optional) Description of breakpoint
@type restore: Boolean
@param restore: (Optional, def=True) Flag controlling whether or not to restore the breakpoint
@type handler: Function Pointer
@param handler: (Optional, def=None) Optional handler to call for this bp instead of the default handler
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('bp_set_hw(0x{:08x}, {}, {})'.format(address, length, condition))
# Instantiate a new hardware breakpoint object for the new bp to create.
hw_bp = HwBreakpoint(address, length, condition,
description, restore, handler=handler)
if length not in (1, 2, 4):
raise PDError('invalid hw breakpoint length: {}.'.format(length))
# length -= 1 because the following codes are used for determining length:
# 00 - 1 byte length
# 01 - 2 byte length
# 10 - undefined
# 11 - 4 byte length
length -= 1
# condition table:
# 00 - break on instruction execution only
# 01 - break on data writes only
# 10 - undefined
# 11 - break on data reads or writes but not instruction fetches
if condition not in (HW_ACCESS, HW_EXECUTE, HW_WRITE):
raise PDError('invalid hw breakpoint condition: {}'.format(condition))
# Check for any available hardware breakpoint slots.
# There doesn't appear to be any difference between local
# and global as far as we are concerned on windows.
#
# bits 0, 2, 4, 6 for local (L0 - L3)
# bits 1, 3, 5, 7 for global (G0 - G3)
#
# We could programatically search for an open slot
# in a given thread context with the following code:
#
# available = None
# for slot in xrange(4):
# if context.Dr7 & (1 << (slot * 2)) == 0:
# available = slot
# break
#
# But since we are doing global hardware breakpoints,
# we rely on ourselves for tracking open slots.
if 0 not in self.hardware_breakpoints:
available = 0
elif 1 not in self.hardware_breakpoints:
available = 1
elif 2 not in self.hardware_breakpoints:
available = 2
elif 3 not in self.hardware_breakpoints:
available = 3
else:
raise PDError('no hw breakpoint slots available.')
# activate the hardware breakpoint for all active threads.
for thread_id in self.enumerate_threads():
context = self.get_thread_context(thread_id=thread_id)
# mark available debug register as active (L0 - L3).
context.Dr7 |= 1 << (available * 2)
# save our breakpoint address to the available hw bp slot.
if available == 0:
context.Dr0 = address
elif available == 1:
context.Dr1 = address
elif available == 2:
context.Dr2 = address
elif available == 3:
context.Dr3 = address
# Set the condition (RW0 - RW3) field for the appropriate slot
# (bits 16/17, 20/21, 24,25, 28/29)
context.Dr7 |= condition << ((available * 4) + 16)
# Set the length (LEN0-LEN3) field for the appropriate slot
# (bits 18/19, 22/23, 26/27, 30/31)
context.Dr7 |= length << ((available * 4) + 18)
# set the thread context.
self.set_thread_context(context, thread_id=thread_id)
# Update the internal hardware breakpoint array at the used slot index.
hw_bp.slot = available
self.hardware_breakpoints[available] = hw_bp
return self.ret_self()
def bp_set_mem(self, address, size, description="", handler=None):
"""Sets a memory breakpoint at the target address.
This is implemented by changing the permissions of the page
containing the address to PAGE_GUARD. To catch memory breakpoints
you have to register the EXCEPTION_GUARD_PAGE callback.
Within the callback handler check the internal pydbg variable
self.memory_breakpoint_hit to determine if the violation was a result
of a direct memory breakpoint hit or some unrelated event.
Alternatively, you can register a custom handler to handle
the memory breakpoint. Memory breakpoints are automatically restored
via the internal single step handler. To remove a memory breakpoint,
you must explicitly call bp_del_mem().
@see: bp_is_ours_mem(), bp_del_mem(), bp_del_mem_all()
@type address: DWORD
@param address: Starting address of the buffer to break on
@type size: Integer
@param size: Size of the buffer to break on
@type description: String
@param description: (Optional) Description to associate with this breakpoint
@type handler: Function Pointer
@param handler: (Optional, def=None) Optional handler to call for this bp instead of the default handler
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('bp_set_mem() buffer range is 0x{:08x} - 0x{:08x}'.format(address, address + size))
# ensure the target address doesn't already sit in a memory breakpoint range:
if self.bp_is_ours_mem(address):
self._log('a memory breakpoint spanning 0x{:08x} already exists'.format(address))
return self.ret_self()
# determine the base address of the page containing the starting point of our buffer.
try:
mbi = self.virtual_query(address)
except:
raise PDError('bp_set_mem(): failed querying address: 0x{:08x}'.format(address))
self._log('buffer starting at 0x{:08x} sits on page starting at 0x{:08x}'.format(address, mbi.BaseAddress))
# Individually change the page permissions for each page our buffer spans.
# Why do we individually set the page permissions of each page
# as opposed to a range of pages? because empirical testing shows
# that when you set a PAGE_GUARD on a range of pages,
# if any of those pages are accessed, then the PAGE_GUARD attribute
# is dropped for the entire range of pages that was originally modified.
# This is undesirable for our purposes when it comes to the ease of
# restoring hit memory breakpoints.
current_page = mbi.BaseAddress
while current_page <= address + size:
self._log('changing page permissions on 0x{:08x}'.format(current_page))
# Keep track of explicitly guarded pages,
# to differentiate from pages guarded by the debuggee / OS.
self._guarded_pages.add(current_page)
self.virtual_protect(current_page, 1, mbi.Protect | PAGE_GUARD)
current_page += self.page_size
# add the breakpoint to the internal list.
self.memory_breakpoints[address] = MemBreakpoint(address, size, mbi,
description, handler)
return self.ret_self()
def close_handle(self, handle):
"""
Convenience wraper around kernel32.CloseHandle()
@type handle: Handle
@param handle: Handle to close
@rtype: Bool
@return: Return value from CloseHandle().
"""
return kernel32.CloseHandle(handle)
def dbg_print_all_debug_registers(self):
"""
*** DEBUG ROUTINE ***
This is a debugging routine that was used when debugging hardware breakpoints.
It was too useful to be removed from the release code.
"""
# ensure we have an up to date context for the current thread.
context = self.get_thread_context(self.h_thread)
print('eip = 0x{:08x}'.format(context.Eip))
print('Dr0 = 0x{:08x}'.format(context.Dr0))
print('Dr1 = 0x{:08x}'.format(context.Dr1))
print('Dr2 = 0x{:08x}'.format(context.Dr2))
print('Dr3 = 0x{:08x}'.format(context.Dr3))
print('Dr7 = {!s}'.format(self.to_binary(context.Dr7)))
print(' 10987654321098765432109876543210')
print(' 332222222222111111111')
def dbg_print_all_guarded_pages(self):
"""
*** DEBUG ROUTINE ***
A debugging routine that was used when debugging memory breakpoints.
It was too useful to be removed from the release code.
"""
cursor = 0
# scan through the entire memory range.
while cursor < 0xFFFFFFFF:
try:
mbi = self.virtual_query(cursor)
except:
break
if mbi.Protect & PAGE_GUARD:
address = mbi.BaseAddress
print('PAGE GUARD on 0x{:08x}'.format(mbi.BaseAddress))
while True:
address += self.page_size
tmp_mbi = self.virtual_query(address)
if not tmp_mbi.Protect & PAGE_GUARD:
break
print('PAGE GUARD on 0x{:08x}'.format(address))
cursor += mbi.RegionSize
def debug_active_process(self, pid):
"""
Convenience wrapper around GetLastError() and FormatMessage().
Returns the error code and formatted message associated with the last error.
You probably do not want to call this directly, rather look at attach().
@type pid: Integer
@param pid: Process ID to attach to
@raise PDError: An exception is raised on failure.
"""
if not kernel32.DebugActiveProcess(pid):
raise PDError('DebugActiveProcess({})'.format(pid), True)
def debug_event_iteration(self):
"""
Check for and process a debug event.
"""
continue_status = DBG_CONTINUE
dbg = DEBUG_EVENT()
# wait for a debug event.
if kernel32.WaitForDebugEvent(byref(dbg), 100):
# grab various information with regards to the current exception.
self.h_thread = self.open_thread(dbg.dwThreadId)
self.context = self.get_thread_context(self.h_thread)
self.dbg = dbg
self.exception_address = dbg.u.Exception.ExceptionRecord.ExceptionAddress
self.write_violation = dbg.u.Exception.ExceptionRecord.ExceptionInformation[0]
self.violation_address = dbg.u.Exception.ExceptionRecord.ExceptionInformation[1]
self.exception_code = dbg.u.Exception.ExceptionRecord.ExceptionCode
if dbg.dwDebugEventCode == CREATE_PROCESS_DEBUG_EVENT:
continue_status = self.event_handler_create_process()
elif dbg.dwDebugEventCode == CREATE_THREAD_DEBUG_EVENT:
continue_status = self.event_handler_create_thread()
elif dbg.dwDebugEventCode == EXIT_PROCESS_DEBUG_EVENT:
continue_status = self.event_handler_exit_process()
elif dbg.dwDebugEventCode == EXIT_THREAD_DEBUG_EVENT:
continue_status = self.event_handler_exit_thread()
elif dbg.dwDebugEventCode == LOAD_DLL_DEBUG_EVENT:
continue_status = self.event_handler_load_dll()
elif dbg.dwDebugEventCode == UNLOAD_DLL_DEBUG_EVENT:
continue_status = self.event_handler_unload_dll()
# an exception was caught.
elif dbg.dwDebugEventCode == EXCEPTION_DEBUG_EVENT:
ec = dbg.u.Exception.ExceptionRecord.ExceptionCode
self._log('debug_event_loop() exception: 0x{:08x}'.format(ec))
# call the internal handler for the exception event that just occured.
if ec == EXCEPTION_ACCESS_VIOLATION:
continue_status = self.exception_handler_access_violation()
elif ec == EXCEPTION_BREAKPOINT:
continue_status = self.exception_handler_breakpoint()
elif ec == EXCEPTION_GUARD_PAGE:
continue_status = self.exception_handler_guard_page()
elif ec == EXCEPTION_SINGLE_STEP:
continue_status = self.exception_handler_single_step()
# generic callback support.
elif ec in self.callbacks:
continue_status = self.callbacks[ec](self)
# unhandled exception.
else:
self._log('TID:0x{:04x} caused an unhandled exception'
' (0x{:08x}) at 0x{:08x}'.format(self.dbg.dwThreadId, ec, self.exception_address))
continue_status = DBG_EXCEPTION_NOT_HANDLED
# If the memory space of the debuggee was tainted, flush the instruction cache.
# from MSDN: Applications should call FlushInstructionCache
# if they generate or modify code in memory.
# The CPU cannot detect the change,
# and may execute the old code it cached.
if self.dirty:
kernel32.FlushInstructionCache(self.h_process, 0, 0)
# Close the opened thread handle and resume executing the thread
# that triggered the debug event.
self.close_handle(self.h_thread)
kernel32.ContinueDebugEvent(dbg.dwProcessId, dbg.dwThreadId, continue_status)
def debug_event_loop(self):
"""
Enter the infinite debug event handling loop.
This is the main loop of the debugger and is responsible for
catching debug events and exceptions and dispatching them appropriately.
This routine will check for and call
the USER_CALLBACK_DEBUG_EVENT callback on each loop iteration.
run() is an alias for this routine.
@see: run()
@raise PDError: An exception is raised on any exceptional conditions,
such as debugger being interrupted or debuggee quiting.
"""
while self.debugger_active:
# don't let the user interrupt us in the midst of handling a debug event.
try:
def_sigint_handler = None
def_sigint_handler = signal.signal(signal.SIGINT, self.sigint_handler)
except:
pass
# if a user callback was specified, call it.
if USER_CALLBACK_DEBUG_EVENT in self.callbacks:
# user callbacks do not / should not access debugger or contextual information.
self.dbg = None
self.context = None
self.callbacks[USER_CALLBACK_DEBUG_EVENT](self)
# iterate through a debug event.
self.debug_event_iteration()
# resume keyboard interruptability.
if def_sigint_handler:
signal.signal(signal.SIGINT, def_sigint_handler)
# close the global process handle.
self.close_handle(self.h_process)
def debug_set_process_kill_on_exit(self, kill_on_exit):
"""
Convenience wrapper around DebugSetProcessKillOnExit().
@type kill_on_exit: Bool
@param kill_on_exit: True to kill the process on debugger exit,
False to let debuggee continue running.
@raise PDError: An exception is raised on failure.
"""
if not kernel32.DebugSetProcessKillOnExit(kill_on_exit):
raise PDError("DebugActiveProcess(%s)" % kill_on_exit, True)
def detach(self):
"""
Detach from debuggee.
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('detaching from debuggee')
# remove all software, memory and hardware breakpoints.
self.bp_del_all()
self.bp_del_mem_all()
self.bp_del_hw_all()
# Try to detach from the target process
# if the API is available on the current platform.
kernel32.DebugActiveProcessStop(self.pid)
self.set_debugger_active(False)
return self.ret_self()
def disasm(self, address):
"""Pydasm disassemble utility function wrapper.
Stores the pydasm decoded instruction in self.instruction.
@type address: DWORD
@param address: Address to disassemble at
@rtype: String
@return: Disassembled string.
"""
# TODO: should replace pydasm with keystone
try:
data = self.read_process_memory(address, 32)
except:
return 'Unable to disassemble at 0x{:08x}'.format(address)
# update our internal member variables.
self.instruction = pydasm.get_instruction(data, pydasm.MODE_32)
if not self.instruction:
self.mnemonic = '[UNKNOWN]'
self.op1 = ''
self.op2 = ''
self.op3 = ''
return '[UNKNOWN]'
else:
self.mnemonic = pydasm.get_mnemonic_string(self.instruction, pydasm.FORMAT_INTEL)
self.op1 = pydasm.get_operand_string(self.instruction, 0, pydasm.FORMAT_INTEL, address)
self.op2 = pydasm.get_operand_string(self.instruction, 1, pydasm.FORMAT_INTEL, address)
self.op3 = pydasm.get_operand_string(self.instruction, 2, pydasm.FORMAT_INTEL, address)
# the rstrip() is for removing extraneous trailing whitespace that libdasm sometimes leaves.
return pydasm.get_instruction_string(self.instruction, pydasm.FORMAT_INTEL, address).rstrip(' ')
def disasm_around(self, address, num_inst=5):
"""
Given a specified address this routine will return
the list of 5 instructions before and after the instruction at address
(including the instruction at address, so 11 instructions in total).
This is accomplished by grabbing a larger chunk of data
around the address than what is predicted as necessary
and then disassembling forward.
If during the forward disassembly the requested address lines up with
the start of an instruction, then the assumption is made
that the forward disassembly self corrected itself and
the instruction set is returned. If we are unable to align with
the original address, then we modify our data slice
and try again until we do.
@type address: DWORD
@param address: Address to disassemble around
@type num_inst: Integer
@param num_inst: (Optional, Def=5) Number of instructions to disassemble up/down from address
@rtype: List
@return: List of tuples (address, disassembly) of instructions around the specified address.
"""
# TODO: should replace disasm with keystone
if num_inst == 0:
return [(address, self.disasm(address))]
if not isinstance(num_inst, int) or num_inst < 0:
self._err('disasm_around called with an invalid window size. reurning error value')
return [(address, 'invalid window size supplied')]
# Grab a safe window size of bytes.
window_size = (num_inst * 64) / 5
# Grab a window of bytes before and after the requested address.
try:
data = self.read_process_memory(address - window_size, window_size * 2)
except:
return [(address, 'Unable to disassemble')]
# The rstrip() is for removing extraneous trailing whitespace
# that libdasm sometimes leaves.
i = pydasm.get_instruction(data[window_size:], pydasm.MODE_32)
disassembly = pydasm.get_instruction_string(i, pydasm.FORMAT_INTEL, address).rstrip(" ")
complete = False
start_byte = 0
# Loop until we retrieve a set of instructions that align to the requested address.
while not complete:
instructions = []
slice = data[start_byte:]
offset = 0
# Step through the bytes in the data slice.
while offset < len(slice):
i = pydasm.get_instruction(slice[offset:], pydasm.MODE_32)
if not i:
break
# Calculate the actual address of the instruction
# at the current offset and grab the disassembly
addr = address - window_size + start_byte + offset
inst = pydasm.get_instruction_string(i, pydasm.FORMAT_INTEL, addr).rstrip(" ")
# Add the address / instruction pair to our list of tuples.
instructions.append((addr, inst))
# Increment the offset into the data slice by the length
# of the current instruction.
offset += i.length
# We're done processing a data slice.
# Step through each addres / instruction tuple in our
# instruction list looking for an instruction alignment match.
# we do the match on address and the original disassembled instruction.
index_of_address = 0
for (addr, inst) in instructions:
if addr == address and inst == disassembly:
complete = True
break
index_of_address += 1
start_byte += 1
return instructions[index_of_address-num_inst:index_of_address+num_inst+1]
def dump_context(self, context=None, stack_depth=5, print_dots=True):
"""
Return an informational block of text describing the CPU context of
the current thread. Information includes:
- Disassembly at current EIP
- Register values in hex, decimal and "smart" dereferenced
- ESP, ESP+4, ESP+8 ... values in hex, decimal and "smart" dereferenced
@see: dump_context_list()
@type context: Context
@param context: (Optional) Current thread context to examine
@type stack_depth: Integer
@param stack_depth: (Optional, def:5) Number of dwords to dereference off of the stack (not including ESP)
@type print_dots: Bool
@param print_dots: (Optional, def:True) Controls suppression of dot in place of non-printable
@rtype: String
@return: Information about current thread context.
"""
# if the optional current thread context was not supplied,
# grab it for the current thread.
if not context:
context = self.context
context_list = self.dump_context_list(context, stack_depth, print_dots)
context_dump = 'CONTEXT DUMP\n'
context_dump += ' EIP: %08x %s\n' % (context.Eip, context_list['eip'])
context_dump += ' EAX: %08x (%10d) -> %s\n' % (context.Eax, context.Eax, context_list['eax'])
context_dump += ' EBX: %08x (%10d) -> %s\n' % (context.Ebx, context.Ebx, context_list['ebx'])
context_dump += ' ECX: %08x (%10d) -> %s\n' % (context.Ecx, context.Ecx, context_list['ecx'])
context_dump += ' EDX: %08x (%10d) -> %s\n' % (context.Edx, context.Edx, context_list['edx'])
context_dump += ' EDI: %08x (%10d) -> %s\n' % (context.Edi, context.Edi, context_list['edi'])
context_dump += ' ESI: %08x (%10d) -> %s\n' % (context.Esi, context.Esi, context_list['esi'])
context_dump += ' EBP: %08x (%10d) -> %s\n' % (context.Ebp, context.Ebp, context_list['ebp'])
context_dump += ' ESP: %08x (%10d) -> %s\n' % (context.Esp, context.Esp, context_list['esp'])
for offset in range(0, stack_depth + 1):
context_dump += ' +%02x: %08x (%10d) -> %s\n' % (
offset * 4,
context_list['esp+%02x'%(offset*4)]['value'],
context_list['esp+%02x'%(offset*4)]['value'],
context_list['esp+%02x'%(offset*4)]['desc']
)
return context_dump
def dump_context_list(self, context=None, stack_depth=5, print_dots=True, hex_dump=False):
"""
Return an informational list of items describing the CPU context of
the current thread. Information includes:
- Disassembly at current EIP
- Register values in hex, decimal and "smart" dereferenced
- ESP, ESP+4, ESP+8 ... values in hex, decimal and "smart" dereferenced
@see: dump_context()
@type context: Context
@param context: (Optional) Current thread context to examine
@type stack_depth: Integer
@param stack_depth: (Optional, def:5) Number of dwords to dereference off of the stack (not including ESP)
@type print_dots: Bool
@param print_dots: (Optional, def:True) Controls suppression of dot in place of non-printable
@type hex_dump: Bool
@param hex_dump: (Optional, def=False) Return a hex dump in the absense of string detection
@rtype: Dictionary
@return: Dictionary of information about current thread context.
"""
# If the optional current thread context was not supplied,
# grab it for the current thread.
if not context:
context = self.context
context_list = {}
context_list['eip'] = self.disasm(context.Eip)
context_list['eax'] = self.smart_dereference(context.Eax, print_dots, hex_dump)
context_list['ebx'] = self.smart_dereference(context.Ebx, print_dots, hex_dump)
context_list['ecx'] = self.smart_dereference(context.Ecx, print_dots, hex_dump)
context_list['edx'] = self.smart_dereference(context.Edx, print_dots, hex_dump)
context_list['edi'] = self.smart_dereference(context.Edi, print_dots, hex_dump)
context_list['esi'] = self.smart_dereference(context.Esi, print_dots, hex_dump)
context_list['ebp'] = self.smart_dereference(context.Ebp, print_dots, hex_dump)
context_list['esp'] = self.smart_dereference(context.Esp, print_dots, hex_dump)
for offset in range(0, stack_depth + 1):
try:
esp = self.flip_endian_dword(self.read_process_memory(context.Esp + offset * 4, 4))
context_list['esp+%02x'%(offset*4)] = {}
context_list['esp+%02x'%(offset*4)]['value'] = esp
context_list['esp+%02x'%(offset*4)]['desc'] = self.smart_dereference(esp, print_dots, hex_dump)
except:
context_list['esp+%02x'%(offset*4)] = {}
context_list['esp+%02x'%(offset*4)]['value'] = 0
context_list['esp+%02x'%(offset*4)]['desc'] = '[INVALID]'
return context_list
def enumerate_modules(self):
"""
Enumerate and return the list of module name / base address tuples that
belong to the debuggee by using the CreateToolhelp32Snapshot() API.
@see: iterate_modules()
@rtype: List
@return: List of module name / base address tuples.
"""
self._log('enumerate_modules()')
module = MODULEENTRY32()
module_list = []
snapshot = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, self.pid)
if snapshot == INVALID_HANDLE_VALUE:
raise PDError('CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, {}'.format(self.pid), True)
# We *must* set the size of the structure prior to using it,
# otherwise Module32First() will fail.
module.dwSize = sizeof(module)
found_mod = kernel32.Module32First(snapshot, byref(module))
while found_mod:
module_list.append((module.szModule, module.modBaseAddr))
found_mod = kernel32.Module32Next(snapshot, byref(module))
self.close_handle(snapshot)
return module_list
def enumerate_processes(self):
"""
Enumerate all system processes returning a list of pid / process name
tuples by using the CreateToolhelp32Snapshot() API.
@see: iterate_processes()
@rtype: List
@return: List of pid / process name tuples.
Example::
for (pid, name) in pydbg.enumerate_processes():
if name == "test.exe":
break
pydbg.attach(pid)
"""
self._log('enumerate_processes()')
pe = PROCESSENTRY32()
process_list = []
snapshot = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
if snapshot == INVALID_HANDLE_VALUE:
raise PDError('CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)', True)
# We *must* set the size of the structure prior to using it,
# otherwise Process32First() will fail.
pe.dwSize = sizeof(PROCESSENTRY32)
found_proc = kernel32.Process32First(snapshot, byref(pe))
while found_proc:
process_list.append((pe.th32ProcessID, pe.szExeFile))
found_proc = kernel32.Process32Next(snapshot, byref(pe))
self.close_handle(snapshot)
return process_list
def enumerate_threads(self):
"""
Enumerate all system threads returning a list of thread IDs that
belong to the debuggee by using the CreateToolhelp32Snapshot() API.
@see: iterate_threads()
@rtype: List
@return: List of thread IDs belonging to the debuggee.
Example::
for thread_id in self.enumerate_threads():
context = self.get_thread_context(None, thread_id)
"""
self._log('enumerate_threads()')
thread_entry = THREADENTRY32()
debuggee_threads = []
snapshot = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, self.pid)
if snapshot == INVALID_HANDLE_VALUE:
raise PDError('CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, {})'.format(self.pid), True)
# We *must* set the size of the structure prior to using it,
# otherwise Thread32First() will fail.
thread_entry.dwSize = sizeof(thread_entry)
success = kernel32.Thread32First(snapshot, byref(thread_entry))
while success:
if thread_entry.th32OwnerProcessID == self.pid:
debuggee_threads.append(thread_entry.th32ThreadID)
success = kernel32.Thread32Next(snapshot, byref(thread_entry))
self.close_handle(snapshot)
return debuggee_threads
def event_handler_create_process(self):
"""
This is the default CREATE_PROCESS_DEBUG_EVENT handler.
@rtype: DWORD
@return: Debug event continue status.
"""
self._log('event_handler_create_process()')
# don't need this.
self.close_handle(self.dbg.u.CreateProcessInfo.hFile)
if not self.follow_forks:
return DBG_CONTINUE
if CREATE_PROCESS_DEBUG_EVENT in self.callbacks:
return self.callbacks[CREATE_PROCESS_DEBUG_EVENT](self)
return DBG_CONTINUE
def event_handler_create_thread(self):
"""
This is the default CREATE_THREAD_DEBUG_EVENT handler.
@rtype: DWORD
@return: Debug event continue status.
"""
self._log('event_handler_create_thread({})'.format(self.dbg.dwThreadId))
# resolve the newly created threads TEB and add it to the internal dictionary.
thread_id = self.dbg.dwThreadId
thread_handle = self.dbg.u.CreateThread.hThread
thread_context = self.get_thread_context(thread_handle)
selector_entry = LDT_ENTRY()
if not kernel32.GetThreadSelectorEntry(thread_handle, thread_context.SegFs, byref(selector_entry)):
self.win32_error('GetThreadSelectorEntry()')
teb = selector_entry.BaseLow
teb += (selector_entry.HighWord.Bits.BaseMid << 16) + (selector_entry.HighWord.Bits.BaseHi << 24)
# add this TEB to the internal dictionary.
self.tebs[thread_id] = teb
# apply any existing hardware breakpoints to this new thread.
for slot, hw_bp in self.hardware_breakpoints.items():
# mark available debug register as active (L0 - L3).
thread_context.Dr7 |= 1 << (slot * 2)
# save our breakpoint address to the available hw bp slot.
if slot == 0:
thread_context.Dr0 = hw_bp.address
elif slot == 1:
thread_context.Dr1 = hw_bp.address
elif slot == 2:
thread_context.Dr2 = hw_bp.address
elif slot == 3:
thread_context.Dr3 = hw_bp.address
# set the condition (RW0 - RW3) field for the appropriate slot
# (bits 16/17, 20/21, 24,25, 28/29)
thread_context.Dr7 |= hw_bp.condition << ((slot * 4) + 16)
# set the length (LEN0-LEN3) field for the appropriate slot
# (bits 18/19, 22/23, 26/27, 30/31)
thread_context.Dr7 |= hw_bp.length << ((slot * 4) + 18)
# set the thread context.
self.set_thread_context(thread_context, thread_id=thread_id)
# pass control to user defined callback.
if CREATE_THREAD_DEBUG_EVENT in self.callbacks:
return self.callbacks[CREATE_THREAD_DEBUG_EVENT](self)
return DBG_CONTINUE
def event_handler_exit_process(self):
"""
This is the default EXIT_PROCESS_DEBUG_EVENT handler.
@raise PDError: An exception is raised to denote process exit.
"""
self.set_debugger_active(False)
if EXIT_PROCESS_DEBUG_EVENT in self.callbacks:
return self.callbacks[EXIT_PROCESS_DEBUG_EVENT](self)
return DBG_CONTINUE
def event_handler_exit_thread(self):
"""
This is the default EXIT_THREAD_DEBUG_EVENT handler.
@rtype: DWORD
@return: Debug event continue status.
"""
# before we remove the TEB entry from our internal list,
# let's give the user a chance to do something with it.
if EXIT_THREAD_DEBUG_EVENT in self.callbacks:
continue_status = self.callbacks[EXIT_THREAD_DEBUG_EVENT](self)
else:
continue_status = DBG_CONTINUE
# remove the TEB entry for the exiting thread id.
if self.dbg.dwThreadId in self.tebs:
del(self.tebs[self.dbg.dwThreadId])
return continue_status
def event_handler_load_dll(self):
"""
This is the default LOAD_DLL_DEBUG_EVENT handler.
You can access the last loaded dll in your callback handler
with the following example code::
last_dll = pydbg.get_system_dll(-1)
print "loading:%s from %s into:%08x size:%d" % (last_dll.name, last_dll.path, last_dll.base, last_dll.size)
The get_system_dll() routine is preferred over directly accessing
the internal data structure for proper and transparent client/server support.
@rtype: DWORD
@return: Debug event continue status.
"""
dll = SystemDLL(self.dbg.u.LoadDll.hFile, self.dbg.u.LoadDll.lpBaseOfDll)
self.system_dlls.append(dll)
if LOAD_DLL_DEBUG_EVENT in self.callbacks:
return self.callbacks[LOAD_DLL_DEBUG_EVENT](self)
return DBG_CONTINUE
def event_handler_unload_dll(self):
"""
This is the default UNLOAD_DLL_DEBUG_EVENT handler.
@rtype: DWORD
@return: Debug event continue status.
"""
base = self.dbg.u.UnloadDll.lpBaseOfDll
unloading = None
for system_dll in self.system_dlls:
if system_dll.base == base:
unloading = system_dll
break
# before we remove the system dll from our internal list,
# let's give the user a chance to do something with it.
if UNLOAD_DLL_DEBUG_EVENT in self.callbacks:
continue_status = self.callbacks[UNLOAD_DLL_DEBUG_EVENT](self)
else:
continue_status = DBG_CONTINUE
if not unloading:
#raise pdx("Unable to locate DLL that is being unloaded from %08x" % base, False)
pass
else:
# close the open file handle to the system dll being unloaded.
self.close_handle(unloading.handle)
# remove the system dll from the internal list.
self.system_dlls.remove(unloading)
del(unloading)
return continue_status
def exception_handler_access_violation(self):
"""
This is the default EXCEPTION_ACCESS_VIOLATION handler.
Responsible for handling the access violation and
passing control to the registered user callback handler.
@attention: If you catch an access violaton and wish to terminate the process,
you *must* still return DBG_CONTINUE to avoid a deadlock.
@rtype: DWORD
@return: Debug event continue status.
"""
if EXCEPTION_ACCESS_VIOLATION in self.callbacks:
return self.callbacks[EXCEPTION_ACCESS_VIOLATION](self)
return DBG_EXCEPTION_NOT_HANDLED
def exception_handler_breakpoint(self):
"""
This is the default EXCEPTION_BREAKPOINT handler,
responsible for transparently restoring soft breakpoints
and passing control to the registered user callback handler.
@rtype: DWORD
@return: Debug event continue status.
"""
self._log('pydbg.exception_handler_breakpoint() at 0x{:08x} from thread id {}'.format(self.exception_address, self.dbg.dwThreadId))
# breakpoints we did not set.
if not self.bp_is_ours(self.exception_address):
# system breakpoints.
if self.exception_address == self.system_break:
# pass control to user registered call back.
if EXCEPTION_BREAKPOINT in self.callbacks:
continue_status = self.callbacks[EXCEPTION_BREAKPOINT](self)
else:
continue_status = DBG_CONTINUE
if self.first_breakpoint:
self._log('first windows driven system breakpoint at 0x{:08x}'.format(self.exception_address))
self.first_breakpoint = False
# ignore all other breakpoints we didn't explicitly set.
else:
self._log('breakpoint not ours 0x{:08x}'.format(self.exception_address))
continue_status = DBG_EXCEPTION_NOT_HANDLED
# breakpoints we did set.
else:
# restore the original byte at the breakpoint address.
self._log('restoring original byte at 0x{:08x}'.format(self.exception_address))
self.write_process_memory(self.exception_address, self.breakpoints[self.exception_address].original_byte)
self.set_attr('dirty', True)
# before we can continue, we have to correct the value of EIP.
# The reason for this is that the 1-byte INT 3
# we inserted causes EIP to "slide" + 1 into
# the original instruction and must be reset.
self.set_register('EIP', self.exception_address)
self.context.Eip -= 1
# if there is a specific handler registered for this bp,
# pass control to it.
if self.breakpoints[self.exception_address].handler:
self._log('calling user handler')
continue_status = self.breakpoints[self.exception_address].handler(self)
# pass control to default user registered call back handler,
# if it is specified.
elif EXCEPTION_BREAKPOINT in self.callbacks:
continue_status = self.callbacks[EXCEPTION_BREAKPOINT](self)
else:
continue_status = DBG_CONTINUE
# if the breakpoint still exists,
# ie: the user didn't erase it during the callback,
# and the breakpoint is flagged for restore,
# then tell the single step handler about it.
# Furthermore, check if the debugger is still active,
# that way we don't try and single step if the user requested a detach.
if self.get_attr('debugger_active') and self.exception_address in self.breakpoints:
if self.breakpoints[self.exception_address].restore:
self._restore_breakpoint = self.breakpoints[self.exception_address]
self.single_step(True)
self.bp_del(self.exception_address)
return continue_status
def exception_handler_guard_page(self):
"""
This is the default EXCEPTION_GUARD_PAGE handler,
responsible for transparently restoring memory breakpoints
passing control to the registered user callback handler.
@rtype: DWORD
@return: Debug event continue status.
"""
self._log('pydbg.exception_handler_guard_page()')
# determine the base address of the page where the offending reference resides.
mbi = self.virtual_query(self.violation_address)
# if the hit is on a page we did not explicitly GUARD,
# then pass the violation to the debuggee.
if mbi.BaseAddress not in self._guarded_pages:
return DBG_EXCEPTION_NOT_HANDLED
# determine if the hit was within a monitored buffer,
# or simply on the same page.
self.memory_breakpoint_hit = self.bp_is_ours_mem(self.violation_address)
# grab the actual memory breakpoint object, for the hit breakpoint.
if self.memory_breakpoint_hit:
self._log('direct hit on memory breakpoint at 0x{:08x}'.format(self.memory_breakpoint_hit))
if self.write_violation:
self._log('write violation from 0x{:08x} on 0x{:08x} of mem bp'.format(self.exception_address, self.violation_address))
else:
self._log('read violation from 0x{:08x} on 0x{:08x} of mem bp'.format(self.exception_address, self.violation_address))
# if there is a specific handler registered for this bp,
# pass control to it.
if self.memory_breakpoint_hit and self.memory_breakpoints[self.memory_breakpoint_hit].handler:
continue_status = self.memory_breakpoints[self.memory_breakpoint_hit].handler(self)
# pass control to default user registered call back handler,
# if it is specified.
elif EXCEPTION_GUARD_PAGE in self.callbacks:
continue_status = self.callbacks[EXCEPTION_GUARD_PAGE](self)
else:
continue_status = DBG_CONTINUE
# if the hit page is still in our list of explicitly guarded pages,
# ie: the user didn't erase it during the
# callback, then tell the single step handler about it. furthermore,
# check if the debugger is still active,
# that way we don't try and single step if the user requested a detach.
if self.get_attr('debugger_active') and mbi.BaseAddress in self._guarded_pages:
self._restore_breakpoint = MemBreakpoint(None, None, mbi, None)
self.single_step(True)
return continue_status
def exception_handler_single_step(self):
"""
This is the default EXCEPTION_SINGLE_STEP handler,
responsible for transparently restoring breakpoints and
passing control to the registered user callback handler.
@rtype: DWORD
@return: Debug event continue status.
"""
self._log('pydbg.exception_handler_single_step()')
# if there is a breakpoint to restore.
if self._restore_breakpoint:
bp = self._restore_breakpoint
# restore a soft breakpoint.
if isinstance(bp, Breakpoint):
self._log('restoring breakpoint at 0x{:08x}'.format(bp.address))
self.bp_set(bp.address, bp.description, bp.restore, bp.handler)
# restore PAGE_GUARD for a memory breakpoint
# (make sure guards are not temporarily suspended).
elif isinstance(bp, MemBreakpoint) and self._guards_active:
self._log('restoring 0x{:08x} +PAGE_GUARD on page based @ 0x{:08x}'.format(bp.mbi.Protect, bp.mbi.BaseAddress))
self.virtual_protect(bp.mbi.BaseAddress, 1, bp.mbi.Protect | PAGE_GUARD)
# restore a hardware breakpoint.
elif isinstance(bp, HwBreakpoint):
self._log('restoring hardware breakpoint on 0x{:08x}'.format(bp.address))
self.bp_set_hw(bp.address, bp.length, bp.condition, bp.description, bp.restore, bp.handler)
# Determine if this single step event occured in reaction to
# a hardware breakpoint and grab the hit breakpoint.
# According to the Intel docs, we should be able to check for
# the BS flag in Dr6. but it appears that windows isn't properly
# propogating that flag down to us.
if self.context.Dr6 & 0x1 and 0 in self.hardware_breakpoints:
self.hardware_breakpoint_hit = self.hardware_breakpoints[0]
elif self.context.Dr6 & 0x2 and 1 in self.hardware_breakpoints:
self.hardware_breakpoint_hit = self.hardware_breakpoints[1]
elif self.context.Dr6 & 0x4 and 2 in self.hardware_breakpoints:
self.hardware_breakpoint_hit = self.hardware_breakpoints[2]
elif self.context.Dr6 & 0x8 and 3 in self.hardware_breakpoints:
self.hardware_breakpoint_hit = self.hardware_breakpoints[3]
# if we are dealing with a hardware breakpoint and
# there is a specific handler registered, pass control to it.
if self.hardware_breakpoint_hit and self.hardware_breakpoint_hit.handler:
continue_status = self.hardware_breakpoint_hit.handler(self)
# pass control to default user registered call back handler, if it is specified.
elif EXCEPTION_SINGLE_STEP in self.callbacks:
continue_status = self.callbacks[EXCEPTION_SINGLE_STEP](self)
# if we single stepped to handle a breakpoint restore.
elif self._restore_breakpoint:
continue_status = DBG_CONTINUE
# macos compatability.
# need to clear TRAP flag for MacOS.
# This doesn't hurt Windows aside from a negligible speed hit.
context = self.get_thread_context(self.h_thread)
context.EFlags &= ~EFLAGS_TRAP
self.set_thread_context(context)
else:
continue_status = DBG_EXCEPTION_NOT_HANDLED
# If we are handling a hardware breakpoint hit and it still exists,
# ie: the user didn't erase it during the callback, and the breakpoint
# is flagged for restore, then tell the single step handler about it.
# Furthermore, check if the debugger is still active,
# that way we don't try and single step if the user requested a detach.
if self.hardware_breakpoint_hit is not None and self.get_attr('debugger_active'):
slot = self.hardware_breakpoint_hit.slot
if slot in self.hardware_breakpoints:
curr = self.hardware_breakpoints[slot]
prev = self.hardware_breakpoint_hit
if curr.address == prev.address:
if prev.restore:
self._restore_breakpoint = prev
self.single_step(True)
self.bp_del_hw(slot=prev.slot)
# Reset the hardware breakpoint hit flag and restore breakpoint variable
self.hardware_breakpoint_hit = None
self._restore_breakpoint = None
return continue_status
def func_resolve(self, dll, function):
"""
Utility function that resolves the address of
a given module / function name pair under the context of the debugger.
@see: func_resolve_debuggee()
@type dll: String
@param dll: Name of the DLL (case-insensitive)
@type function: String
@param function: Name of the function to resolve (case-sensitive)
@rtype: DWORD
@return: Address
"""
handle = kernel32.LoadLibraryA(dll)
address = kernel32.GetProcAddress(handle, function)
kernel32.FreeLibrary(handle)
return address
def func_resolve_debuggee(self, dll_name, func_name):
"""
Utility function that resolves the address of
a given module / function name pair under the context of the debuggee.
Note: Be weary of calling this function from within a LOAD_DLL handler
as the module is not yet fully loaded and therefore the snapshot will not include it.
@author: Otto Ebeling
@see: func_resolve()
@todo: Add support for followed imports.
@type dll_name: String
@param dll_name: Name of the DLL (case-insensitive, ex:ws2_32.dll)
@type func_name: String
@param func_name: Name of the function to resolve (case-sensitive)
@rtype: DWORD
@return: Address of the symbol in the target process address space if it can be resolved, None otherwise
"""
dll_name = dll_name.lower()
# we can't make the assumption that all DLL names end in .dll,
# for example Quicktime libs end in .qtx / .qts
# so instead of this old line:
# if not dll_name.endswith(".dll"):
# we'll check for the presence of a dot and will add .dll as a conveneince.
if not dll_name.count('.'):
dll_name += '.dll'
for module in self.iterate_modules():
if module.szModule.lower() == dll_name:
base_address = module.modBaseAddr
dos_header = self.read_process_memory(base_address, 0x40)
# Check validity of DOS header
if len(dos_header) != 0x40 or dos_header[:2] != 'MZ':
continue
e_lfanew = struct.unpack('<I', dos_header[0x3c:0x40])[0]
pe_headers = self.read_process_memory(base_address + e_lfanew, 0xF8)
# Check validity of PE headers
if len(pe_headers) != 0xF8 or pe_headers[:2] != 'PE':
continue
export_directory_rva = struct.unpack('<I', pe_headers[0x78:0x7C])[0]
export_directory_len = struct.unpack('<I', pe_headers[0x7C:0x80])[0]
export_directory = self.read_process_memory(base_address + export_directory_rva, export_directory_len)
num_of_functions = struct.unpack('<I', export_directory[0x14:0x18])[0]
num_of_names = struct.unpack('<I', export_directory[0x18:0x1C])[0]
address_of_functions = struct.unpack('<I', export_directory[0x1C:0x20])[0]
address_of_names = struct.unpack('<I', export_directory[0x20:0x24])[0]
address_of_ordinals = struct.unpack('<I', export_directory[0x24:0x28])[0]
name_table = self.read_process_memory(base_address + address_of_names, num_of_names * 4)
# Perform a binary search across the function names.
low = 0
high = num_of_names
while low <= high:
# Python does not suffer from integer overflows:
# http://googleresearch.blogspot.com/2006/06/extra-extra-read-all-about-it-nearly.html
middle = (low + high) / 2
current_address = base_address + struct.unpack('<I', name_table[middle*4:(middle+1)*4])[0]
# We use a crude approach here:
# Read 256 bytes and cut on NULL char. not very beautiful,
# but reading 1 byte at a time is very slow.
name_buffer = self.read_process_memory(current_address, 256)
name_buffer = name_buffer[:name_buffer.find('\0')]
if name_buffer < func_name:
low = middle + 1
elif name_buffer > func_name:
high = middle - 1
else:
# MSFT documentation is misleading - see http://www.bitsum.com/pedocerrors.htm
bin_ordinal = self.read_process_memory(base_address + address_of_ordinals + middle * 2, 2)
ordinal = struct.unpack('<H', bin_ordinal)[0] # ordinalBase has already been subtracted
bin_func_address = self.read_process_memory(base_address + address_of_functions + ordinal * 4, 4)
function_address = struct.unpack('<I', bin_func_address)[0]
return base_address + function_address
# function was not found.
return None
# module was not found.
return None
def get_ascii_string(self, data):
"""
Retrieve the ASCII string, if any, from data.
Ensure that the string is valid by checking against the minimum
length requirement defined in self.STRING_EXPLORATION_MIN_LENGTH.
@type data: Raw
@param data: Data to explore for printable ascii string
@rtype: String
@return: False on failure, ascii string on discovered string.
"""
discovered = ''
for char in data:
# if we've hit a non printable char, break
if ord(char) < 32 or ord(char) > 126:
break
discovered += char
if len(discovered) < self.STRING_EXPLORATION_MIN_LENGTH:
return False
return discovered
def get_arg(self, index, context=None):
"""
Given a thread context, this convenience routine will retrieve
the function argument at the specified index.
The return address of the function can be retrieved by specifying
an index of 0. This routine should be called from breakpoint handlers
at the top of a function.
@type index: Integer
@param index: Data to explore for printable ascii string
@type context: Context
@param context: (Optional) Current thread context to examine
@rtype: DWORD
@return: Value of specified argument.
"""
# if the optional current thread context was not supplied,
# grab it for the current thread.
if not context:
context = self.context
arg_val = self.read_process_memory(context.Esp + index * 4, 4)
arg_val = self.flip_endian_dword(arg_val)
return arg_val
def get_attr(self, attribute):
"""
Return the value for the specified class attribute.
This routine should be used over directly accessing class member variables
for transparent support across local vs. client/server debugger clients.
@see: set_attr()
@type attribute: String
@param attribute: Name of attribute to return.
@rtype: Mixed
@return: Requested attribute or None if not found.
"""
if not hasattr(self, attribute):
return None
return getattr(self, attribute)
def get_debug_privileges(self):
"""
Obtain necessary privileges for debugging.
@raise PDError: An exception is raised on failure.
"""
h_token = HANDLE()
luid = LUID()
token_state = TOKEN_PRIVILEGES()
self._log('get_debug_privileges()')
if not advapi32.OpenProcessToken(kernel32.GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES, byref(h_token)):
raise PDError('OpenProcessToken()', True)
if not advapi32.LookupPrivilegeValueA(0, 'seDebugPrivilege', byref(luid)):
raise PDError('LookupPrivilegeValue()', True)
token_state.PrivilegeCount = 1
token_state.Privileges[0].Luid = luid
token_state.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED
if not advapi32.AdjustTokenPrivileges(h_token, 0, byref(token_state), 0, 0, 0):
raise PDError('AdjustTokenPrivileges()', True)
def get_instruction(self, address):
"""
Pydasm disassemble utility function wrapper.
Returns the pydasm decoded instruction in self.instruction.
@type address: DWORD
@param address: Address to disassemble at
@rtype: pydasm instruction
@return: pydasm instruction
"""
try:
data = self.read_process_memory(address, 32)
except:
return 'Unable to disassemble at 0x{:08x}'.format(address)
return pydasm.get_instruction(data, pydasm.MODE_32)
def get_printable_string(self, data, print_dots=True):
"""
description
@type data: Raw
@param data: Data to explore for printable ascii string
@type print_dots: Bool
@param print_dots: (Optional, def:True) Controls suppression of dot in place of non-printable
@rtype: String
@return: False on failure, discovered printable chars in string otherwise.
"""
discovered = ''
for char in data:
if 32 <= ord(char) <= 126:
discovered += char
elif print_dots:
discovered += '.'
return discovered
def get_register(self, register):
"""
Get the value of a register in the debuggee within the context of the self.h_thread.
@type register: Register
@param register: One of EAX, EBX, ECX, EDX, ESI, EDI, ESP, EBP, EIP
@raise PDError: An exception is raised on failure.
@rtype: DWORD
@return: Value of specified register.
"""
self._log('getting {} in thread id {}'.format(register, self.dbg.dwThreadId))
# TODO: Need to refactor this code
register = register.upper()
if register not in ('EAX', 'EBX', 'ECX', 'EDX', 'ESI', 'EDI', 'ESP', 'EBP', 'EIP'):
raise PDError('invalid register specified')
# ensure we have an up to date thread context.
context = self.get_thread_context(self.h_thread)
# TODO: Need to refactor this code
if register == 'EAX':
return context.Eax
elif register == 'EBX':
return context.Ebx
elif register == 'ECX':
return context.Ecx
elif register == 'EDX':
return context.Edx
elif register == 'ESI':
return context.Esi
elif register == 'EDI':
return context.Edi
elif register == 'ESP':
return context.Esp
elif register == 'EBP':
return context.Ebp
elif register == 'EIP':
return context.Eip
# this shouldn't ever really be reached.
return 0
def get_system_dll(self, idx):
"""
Return the system DLL at the specified index.
If the debugger is in client / server mode, remove the PE
structure (we do not want to send that mammoth over the wire).
@type idx: Integer
@param idx: Index into self.system_dlls[] to retrieve DLL from.
@rtype: Mixed
@return: Requested attribute or None if not found.
"""
self._log('get_system_dll()')
try:
dll = self.system_dlls[idx]
except:
# index out of range.
return None
dll.pe = None
return dll
def get_thread_context(self, thread_handle=None, thread_id=0):
"""
Convenience wrapper around GetThreadContext().
Can obtain a thread context via a handle or thread id.
@type thread_handle: HANDLE
@param thread_handle: (Optional) Handle of thread to get context of
@type thread_id: Integer
@param thread_id: (Optional) ID of thread to get context of
@raise PDError: An exception is raised on failure.
@rtype: CONTEXT
@return: Thread CONTEXT on success.
"""
context = CONTEXT()
context.ContextFlags = CONTEXT_FULL | CONTEXT_DEBUG_REGISTERS
# if a thread handle was not specified, get one from the thread id.
if not thread_handle:
h_thread = self.open_thread(thread_id)
else:
h_thread = thread_handle
if not kernel32.GetThreadContext(h_thread, byref(context)):
raise PDError('GetThreadContext()', True)
# if we had to resolve the thread handle, close it.
if not thread_handle:
self.close_handle(h_thread)
return context
def get_unicode_string(self, data):
"""
description
@type data: Raw
@param data: Data to explore for printable unicode string
@rtype: String
@return: False on failure, ascii-converted unicode string on discovered string.
"""
discovered = ''
every_other = True
for char in data:
if every_other:
# if we've hit a non printable char, break
if ord(char) < 32 or ord(char) > 126:
break
discovered += char
every_other = not every_other
if len(discovered) < self.STRING_EXPLORATION_MIN_LENGTH:
return False
return discovered
def hex_dump(self, data, addr=0, prefix=""):
"""
Utility function that converts data into hex dump format.
@type data: Raw Bytes
@param data: Raw bytes to view in hex dump
@type addr: DWORD
@param addr: (Optional, def=0) Address to start hex offset display from
@type prefix: String (Optional, def="")
@param prefix: String to prefix each line of hex dump with.
@rtype: String
@return: Hex dump of data.
"""
dump = prefix
slice_ = ''
for byte in data:
if addr % 16 == 0:
dump += ' '
for char in slice_:
if ord(char) >= 32 and ord(char) <= 126:
dump += char
else:
dump += '.'
dump += '\n{}{:04x}: '.format(prefix, addr)
slice_ = ''
dump += '{:02x} '.format(ord(byte))
slice_ += byte
addr += 1
remainder = addr % 16
if remainder != 0:
dump += ' ' * (16 - remainder) + ' '
for char in slice_:
if 32 <= ord(char) <= 126:
dump += char
else:
dump += '.'
return dump + '\n'
def hide_debugger(self):
"""
Hide the presence of the debugger.
This routine requires an active context and therefore can not be called
immediately after a load() for example.
Call it from the first chance breakpoint handler. This routine hides
the debugger in the following ways:
- Modifies the PEB flag that IsDebuggerPresent() checks for.
@raise PDError: An exception is raised if we are unable to hide
the debugger for various reasons.
"""
selector_entry = LDT_ENTRY()
# a current thread context is required.
if not self.context:
raise PDError('hide_debugger(): a thread context is required. Call me from a breakpoint handler.')
if not kernel32.GetThreadSelectorEntry(self.h_thread, self.context.SegFs, byref(selector_entry)):
self.win32_error('GetThreadSelectorEntry()')
fs_base = selector_entry.BaseLow
fs_base += (selector_entry.HighWord.Bits.BaseMid << 16) + (selector_entry.HighWord.Bits.BaseHi << 24)
# http://openrce.org/reference_library/files/reference/Windows Memory Layout, User-Kernel Address Spaces.pdf
# find the peb.
peb = self.read_process_memory(fs_base + 0x30, 4)
peb = self.flip_endian_dword(peb)
# zero out the flag. (3rd byte)
self.write_process_memory(peb+2, '\x00', 1)
return self.ret_self()
def is_address_on_stack(self, address, context=None):
"""
Utility function to determine if the specified address exists
on the current thread stack or not.
@type address: DWORD
@param address: Address to check
@type context: Context
@param context: (Optional) Current thread context to examine
@rtype: Bool
@return: True if address lies in current threads stack range, False otherwise.
"""
# if the optional current thread context was not supplied,
# grab it for the current thread.
if not context:
context = self.context
(stack_top, stack_bottom) = self.stack_range(context)
if stack_bottom <= address <= stack_top:
return True
return False
def iterate_modules(self):
"""
A simple iterator function that can be used to iterate
through all modules the target process has mapped in its
address space. Yielded objects are of type MODULEENTRY32.
@author: Otto Ebeling
@see: enumerate_modules()
@warning: break-ing out of loops over this routine will cause a handle leak.
@rtype: MODULEENTRY32
@return: Iterated module entries.
"""
self._log('iterate_modules()')
current_entry = MODULEENTRY32()
snapshot = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, self.pid)
if snapshot == INVALID_HANDLE_VALUE:
raise PDError('CreateToolhelp32Snapshot(TH32CS_SNAPMODULE, {})'.format(self.pid, True))
# We *must* set the size of the structure prior to using it,
# otherwise Module32First() will fail.
current_entry.dwSize = sizeof(current_entry)
if not kernel32.Module32First(snapshot, byref(current_entry)):
return
while True:
yield current_entry
if not kernel32.Module32Next(snapshot, byref(current_entry)):
break
# if the above loop is "broken" out of, then this handle leaks.
self.close_handle(snapshot)
def iterate_processes(self):
"""
A simple iterator function that can be used to iterate through all
running processes. Yielded objects are of type PROCESSENTRY32.
@see: enumerate_processes()
@warning: break-ing out of loops over this routine will cause a handle leak.
@rtype: PROCESSENTRY32
@return: Iterated process entries.
"""
self._log('iterate_processes()')
pe = PROCESSENTRY32()
snapshot = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
if snapshot == INVALID_HANDLE_VALUE:
raise PDError('CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)', True)
# We *must* set the size of the structure prior to using it,
# otherwise Process32First() will fail.
pe.dwSize = sizeof(PROCESSENTRY32)
if not kernel32.Process32First(snapshot, byref(pe)):
return
while True:
yield pe
if not kernel32.Process32Next(snapshot, byref(pe)):
break
# if the above loop is "broken" out of, then this handle leaks.
self.close_handle(snapshot)
def iterate_threads(self):
"""
A simple iterator function that can be used to iterate through all
running processes. Yielded objects are of type THREADENTRY32.
@see: enumerate_threads()
@warning: break-ing out of loops over this routine will cause a handle leak.
@rtype: THREADENTRY32
@return: Iterated process entries.
"""
self._log('iterate_threads()')
thread_entry = THREADENTRY32()
snapshot = kernel32.CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, self.pid)
if snapshot == INVALID_HANDLE_VALUE:
raise PDError('CreateToolhelp32Snapshot(TH32CS_SNAPTHREAD, {})'.format(self.pid), True)
# We *must* set the size of the structure prior to using it,
# otherwise Thread32First() will fail.
thread_entry.dwSize = sizeof(thread_entry)
if not kernel32.Thread32First(snapshot, byref(thread_entry)):
return
while True:
if thread_entry.th32OwnerProcessID == self.pid:
yield thread_entry
if not kernel32.Thread32Next(snapshot, byref(thread_entry)):
break
# if the above loop is "broken" out of, then this handle leaks.
self.close_handle(snapshot)
def flip_endian(self, dword):
"""
Utility function to flip the endianess a given DWORD into raw bytes.
@type dword: DWORD
@param dowrd: DWORD whose endianess to flip
@rtype: Raw Bytes
@return: Converted DWORD in raw bytes.
"""
byte1 = chr(dword % 256)
dword = dword >> 8
byte2 = chr(dword % 256)
dword = dword >> 8
byte3 = chr(dword % 256)
dword = dword >> 8
byte4 = chr(dword % 256)
return '%c%c%c%c' % (byte1, byte2, byte3, byte4)
def flip_endian_dword(self, bytes):
"""
Utility function to flip the endianess of
a given set of raw bytes into a DWORD.
@type bytes: Raw Bytes
@param bytes: Raw bytes whose endianess to flip
@rtype: DWORD
@return: Converted DWORD.
"""
return struct.unpack('<L', bytes)[0]
def load(self, path_to_file, command_line=None, create_new_console=False, show_window=True):
"""
Load the specified executable and optional command line arguments into the debugger.
@todo: This routines needs to be further tested ... I nomally just attach.
@type path_to_file: String
@param path_to_file: Full path to executable to load in debugger
@type command_line: String
@param command_line: (Optional, def=None) Command line arguments to pass to debuggee
@type create_new_console: Boolean
@param create_new_console: (Optional, def=False) Create a new console for the debuggee.
@type show_window: Boolean
@param show_window: (Optional, def=True) Show / hide the debuggee window.
@raise PDError: An exception is raised if we are unable to load the specified executable in the debugger.
"""
pi = PROCESS_INFORMATION()
si = STARTUPINFO()
si.cb = sizeof(si)
# these flags control the main window display of the debuggee.
if not show_window:
si.dwFlags = 0x1
si.wShowWindow = 0x0
# CreateProcess() seems to work better with command line arguments
# when the path_to_file is passed as NULL.
if command_line:
command_line = path_to_file + ' ' + command_line
path_to_file = 0
if self.follow_forks:
creation_flags = DEBUG_PROCESS
else:
creation_flags = DEBUG_ONLY_THIS_PROCESS
if create_new_console:
creation_flags |= CREATE_NEW_CONSOLE
success = kernel32.CreateProcessA(c_char_p(path_to_file),
c_char_p(command_line),
0,
0,
0,
creation_flags,
0,
0,
byref(si),
byref(pi))
if not success:
raise PDError('CreateProcess()', True)
# allow detaching on systems that support it.
try:
self.debug_set_process_kill_on_exit(False)
except:
pass
# store the handles we need.
self.pid = pi.dwProcessId
self.h_process = pi.hProcess
# resolve the PEB address.
selector_entry = LDT_ENTRY()
thread_context = self.get_thread_context(pi.hThread)
if not kernel32.GetThreadSelectorEntry(pi.hThread, thread_context.SegFs, byref(selector_entry)):
self.win32_error('GetThreadSelectorEntry()')
teb = selector_entry.BaseLow
teb += (selector_entry.HighWord.Bits.BaseMid << 16) + (selector_entry.HighWord.Bits.BaseHi << 24)
# add this TEB to the internal dictionary.
self.tebs[pi.dwThreadId] = teb
self.peb = self.read_process_memory(teb + 0x30, 4)
self.peb = struct.unpack('<L', self.peb)[0]
# If the function (CreateProcess) succeeds,
# be sure to call the CloseHandle function to close the hProcess and
# hThread handles when you are finished with them. -bill gates
#
# we keep the process handle open but don't need the thread handle.
self.close_handle(pi.hThread)
def open_process(self, pid):
"""
Convenience wrapper around OpenProcess().
@type pid: Integer
@param pid: Process ID to attach to
@raise PDError: An exception is raised on failure.
"""
self.h_process = kernel32.OpenProcess(PROCESS_ALL_ACCESS, False, pid)
if not self.h_process:
raise PDError('OpenProcess({})'.format(pid), True)
return self.h_process
def open_thread(self, thread_id):
"""
Convenience wrapper around OpenThread().
@type thread_id: Integer
@param thread_id: ID of thread to obtain handle to
@raise PDError: An exception is raised on failure.
"""
h_thread = kernel32.OpenThread(THREAD_ALL_ACCESS, False, thread_id)
if not h_thread:
raise PDError('OpenThread({})'.format(thread_id), True)
return h_thread
def page_guard_clear(self):
"""
Clear all debugger-set PAGE_GUARDs from memory.
This is useful for suspending memory breakpoints to single step
past a REP instruction.
@see: page_guard_restore()
@rtype: PyDBG
@return: Self
"""
self._guards_active = False
for page in self._guarded_pages:
# make a best effort, let's not crash on failure though.
try:
mbi = self.virtual_query(page)
self.virtual_protect(mbi.BaseAddress, 1, mbi.Protect & ~PAGE_GUARD)
except:
pass
return self.ret_self()
def page_guard_restore(self):
"""
Restore all previously cleared debugger-set PAGE_GUARDs from memory.
This is useful for suspending memory
breakpoints to single step past a REP instruction.
@see: page_guard_clear()
@rtype: PyDBG
@return: Self
"""
self._guards_active = True
for page in self._guarded_pages:
# make a best effort, let's not crash on failure though.
try:
mbi = self.virtual_query(page)
self.virtual_protect(mbi.BaseAddress, 1, mbi.Protect | PAGE_GUARD)
except:
pass
return self.ret_self()
def pid_to_port(self, pid):
"""
A helper function that enumerates the IPv4 endpoints for a given process ID.
@author: Justin Seitz
@type pid: Integer
@param pid: Process ID to find port information on.
@raise PDError: An exception is raised on failure
@rtype: A list of tuples
@return: A list of the protocol, bound address and listening port
"""
# local variables to hold all our necessary sweetness.
listening_port = None
bound_address = None
protocol = None
port_list = []
tcp_table = MIB_TCPTABLE_OWNER_PID()
udp_table = MIB_UDPTABLE_OWNER_PID()
init_size = c_int()
# TCP ENDPOINTS
# the first run is to determine the sizing of the struct.
size_result = iphlpapi.GetExtendedTcpTable(byref(tcp_table),
byref(init_size),
False,
AF_INET,
TCP_TABLE_OWNER_PID_ALL,
0)
if not size_result:
raise PDError("Couldn't retrieve extended TCP information for PID: {}".format(pid), True)
# retrieve the table of TCP_ROW structs, with the correct size this time.
reslt = iphlpapi.GetExtendedTcpTable(byref(tcp_table),
byref(init_size),
False,
AF_INET,
TCP_TABLE_OWNER_PID_ALL,
0)
# step through the entries. we only want ports that have
# the listening flag set. snag the port, address and
# protocol tuple and add it to port_list.
for i in range(tcp_table.dwNumEntries):
if tcp_table.table[i].dwOwningPid == pid:
if tcp_table.table[i].dwState == MIB_TCP_STATE_LISTEN:
listening_port = '{}'.format(socket.ntohs(tcp_table.table[i].dwLocalPort))
bound_address = socket.inet_ntoa(struct.pack('L', tcp_table.table[i].dwLocalAddr))
protocol = 'TCP'
port_list.append((protocol, bound_address, listening_port))
# UDP ENDPOINTS
# NOTE: An application can bind a UDP port explicitly to
# send datagrams, this may not be 100% accurate
# so we only take into account those UDP sockets which are bound
# in a manner that allows datagrams on any interface.
init_size = c_int(0)
size_resuld = iphlpapi.GetExtendedUdpTable(byref(udp_table),
byref(init_size),
False,
AF_INET,
UDP_TABLE_OWNER_PID,
0)
# retrieve the table of UDP_ROW structs.
if not size_result:
raise PDError("Couldn't retrieve extended UDP information for PID: {}".format(pid), True)
result = iphlpapi.GetExtendedUdpTable(byref(udp_table),
byref(init_size),
False,
AF_INET,
UDP_TABLE_OWNER_PID,
0)
for i in range(udp_table.dwNumEntries):
if udp_table.table[i].dwOwningPid == pid:
# if the local addr is 0 then it is a listening socket accepting datagrams.
if udp_table.table[i].dwLocalAddr == 0:
listening_port = '{}'.format(socket.ntohs(udp_table.table[i].dwLocalPort))
bound_address = socket.inet_ntoa(struct.pack('L', udp_table.table[i].dwLocalAddr))
protocol = 'UDP'
port_list.append((protocol, bound_address, listening_port))
return port_list
def process_restore(self):
"""
Restore memory / context snapshot of the debuggee.
All threads must be suspended before calling this routine.
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
# fetch the current list of threads.
current_thread_list = self.enumerate_threads()
# restore the thread context for threads still active.
for thread_context in self.memory_snapshot_contexts:
if thread_context.thread_id in current_thread_list:
self.set_thread_context(thread_context.context, thread_id=thread_context.thread_id)
# restore all saved memory blocks.
for memory_block in self.memory_snapshot_blocks:
try:
self.write_process_memory(
memory_block.mbi.BaseAddress,
memory_block.data,
memory_block.mbi.RegionSize
)
except PDError as err:
self._err('-- IGNORING ERROR --')
self._err('process_restore: ' + err.__str__().rstrip('\r\n'))
pass
return self.ret_self()
def process_snapshot(self, mem_only=False):
"""
Take memory / context snapshot of the debuggee.
All threads must be suspended before calling this routine.
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('taking debuggee snapshot')
do_not_snapshot = [PAGE_READONLY, PAGE_EXECUTE_READ, PAGE_GUARD, PAGE_NOACCESS]
cursor = 0
# reset the internal snapshot data structure lists.
self.memory_snapshot_blocks = []
self.memory_snapshot_contexts = []
if not mem_only:
# enumerate the running threads and save a copy of their contexts.
for thread_id in self.enumerate_threads():
context = self.get_thread_context(None, thread_id)
self.memory_snapshot_contexts.append(MemSnapshotContext(thread_id, context))
self._log('saving thread context of thread id: 0x{:08x}'.format(thread_id))
# Scan through the entire memory range and
# save a copy of suitable memory blocks.
while cursor < 0xFFFFFFFF:
save_block = True
try:
mbi = self.virtual_query(cursor)
except:
break
# Do not snapshot blocks of memory that match the following characteristics.
# XXX - might want to drop the MEM_IMAGE check to accomodate for self modifying code.
if mbi.State != MEM_COMMIT or mbi.Type == MEM_IMAGE:
save_block = False
for has_protection in do_not_snapshot:
if mbi.Protect & has_protection:
save_block = False
break
if save_block:
self._log('Adding 0x{:08x} +{} to memory snapsnot.'.format(mbi.BaseAddress, mbi.RegionSize))
# read the raw bytes from the memory block.
data = self.read_process_memory(mbi.BaseAddress, mbi.RegionSize)
self.memory_snapshot_blocks.append(MemSnapshotBlock(mbi, data))
cursor += mbi.RegionSize
return self.ret_self()
def read(self, address, length):
"""
Alias to read_process_memory().
@see: read_process_memory
"""
return self.read_process_memory(address, length)
def read_msr(self, address):
"""
Read data from the specified MSR address.
@see: write_msr
@type address: DWORD
@param address: MSR address to read from.
@rtype: tuple
@return: (read status, msr structure)
"""
msr = SYSDBG_MSR()
msr.Address = 0x1D9
msr.Data = 0xFF # must initialize this value.
status = ntdll.NtSystemDebugControl(SysDbgReadMsr,
byref(msr),
sizeof(SYSDBG_MSR),
byref(msr),
sizeof(SYSDBG_MSR),
0)
return status, msr
def read_process_memory(self, address, length):
"""
Read from the debuggee process space.
@type address: DWORD
@param address: Address to read from.
@type length: Integer
@param length: Length, in bytes, of data to read.
@raise PDError: An exception is raised on failure.
@rtype: Raw
@return: Read data.
"""
data = ''
read_buf = create_string_buffer(length)
count = c_ulong(0)
orig_length = length
orig_address = address
# ensure we can read from the requested memory space.
_address = address
_length = length
try:
old_protect = self.virtual_protect(_address, _length, PAGE_EXECUTE_READWRITE)
except:
pass
while length:
if not kernel32.ReadProcessMemory(self.h_process, address, read_buf, length, byref(count)):
if not len(data):
raise PDError('ReadProcessMemory(0x{:08x}, {}, read={})'.format(
address, length, count.value), True)
else:
return data
data += read_buf.raw
length -= count.value
address += count.value
# restore the original page permissions on the target memory region.
try:
self.virtual_protect(_address, _length, old_protect)
except:
pass
return data
def resume_all_threads(self):
"""
Resume all process threads.
@see: suspend_all_threads()
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
for thread_id in self.enumerate_threads():
self.resume_thread(thread_id)
return self.ret_self()
def resume_thread(self, thread_id):
"""
Resume the specified thread.
@type thread_id: DWORD
@param thread_id: ID of thread to resume.
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('resuming thread: 0x{:08x}'.format(thread_id))
thread_handle = self.open_thread(thread_id)
if kernel32.ResumeThread(thread_handle) == -1:
raise PDError('ResumeThread()', True)
self.close_handle(thread_handle)
return self.ret_self()
def ret_self(self):
"""
This convenience routine exists for internal functions to call
and transparently return the correct version of self.
Specifically, an object in normal mode and a moniker
when in client/server mode.
@return: Client / server safe version of self
"""
if self.client_server:
return '**SELF**'
else:
return self
def run(self):
"""
Alias for debug_event_loop().
@see: debug_event_loop()
"""
self.debug_event_loop()
def seh_unwind(self, context=None):
"""
Unwind the the Structured Exception Handler (SEH) chain of
the current or specified thread to the best of our abilities.
The SEH chain is a simple singly linked list,
the head of which is pointed to by fs:0. In cases where the SEH chain
is corrupted and the handler address points to invalid memory,
it will be returned as 0xFFFFFFFF.
@type context: Context
@param context: (Optional) Current thread context to examine
@rtype: List of Tuples
@return: Naturally ordered list of SEH addresses and handlers.
"""
self._log('seh_unwind()')
selector_entry = LDT_ENTRY()
seh_chain = []
# if the optional current thread context was not supplied,
# grab it for the current thread.
if not context:
context = self.context
if not kernel32.GetThreadSelectorEntry(self.h_thread, context.SegFs, byref(selector_entry)):
self.win32_error('GetThreadSelectorEntry()')
fs_base = selector_entry.BaseLow
fs_base += (selector_entry.HighWord.Bits.BaseMid << 16) + (selector_entry.HighWord.Bits.BaseHi << 24)
# determine the head of the current threads SEH chain.
seh_head = self.read_process_memory(fs_base, 4)
seh_head = self.flip_endian_dword(seh_head)
while seh_head != 0xFFFFFFFF:
try:
handler = self.read_process_memory(seh_head + 4, 4)
handler = self.flip_endian_dword(handler)
except:
handler = 0xFFFFFFFF
try:
seh_head = self.read_process_memory(seh_head, 4)
seh_head = self.flip_endian_dword(seh_head)
except:
seh_head = 0xFFFFFFFF
seh_chain.append((seh_head, handler))
return seh_chain
def set_attr(self, attribute, value):
"""
Return the value for the specified class attribute.
This routine should be used over directly accessing
class member variables for transparent support across
local vs. client/server debugger clients.
@see: set_attr()
@type attribute: String
@param attribute: Name of attribute to return.
@type value: Mixed
@param value: Value to set attribute to.
"""
if hasattr(self, attribute):
setattr(self, attribute, value)
def set_callback(self, exception_code, callback_func):
"""
Set a callback for the specified exception (or debug event) code.
The prototype of the callback routines is::
func (pydbg):
return DBG_CONTINUE # or other continue status
You can register callbacks for any exception code or debug event.
Look in the source for all event_handler_??? and
exception_handler_??? routines to see which ones have
internal processing (internal handlers will still
pass control to your callback).
You can also register a user specified callback that is called
on each loop iteration from within debug_event_loop().
The callback code is USER_CALLBACK_DEBUG_EVENT and the function
prototype is::
func (pydbg)
return DBG_CONTINUE # or other continue status
User callbacks do not / should not access debugger or contextual information.
@type exception_code: Long
@param exception_code: Exception code to establish a callback for
@type callback_func: Function
@param callback_func: Function to call when specified exception code is caught.
"""
self.callbacks[exception_code] = callback_func
def set_debugger_active(self, enable):
"""
Enable or disable the control flag for the main debug event loop.
This is a convenience shortcut over set_attr.
@type enable: Boolean
@param enable: Flag controlling the main debug event loop.
"""
self._log('setting debug event loop flag to {}'.format(enable))
self.debugger_active = enable
def set_register(self, register, value):
"""
Set the value of a register in the debuggee within the context of the self.h_thread.
@type register: Register
@param register: One of EAX, EBX, ECX, EDX, ESI, EDI, ESP, EBP, EIP
@type value: DWORD
@param value: Value to set register to
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('setting {} to 0x{:08x} in thread id {}'.format(register, value, self.dbg.dwThreadId))
register = register.upper()
if register not in ('EAX', 'EBX', 'ECX', 'EDX', 'ESI', 'EDI', 'ESP', 'EBP', 'EIP'):
raise PDError('invalid register specified')
# ensure we have an up to date thread context.
context = self.get_thread_context(self.h_thread)
if register == 'EAX':
context.Eax = value
elif register == 'EBX':
context.Ebx = value
elif register == 'ECX':
context.Ecx = value
elif register == 'EDX':
context.Edx = value
elif register == 'ESI':
context.Esi = value
elif register == 'EDI':
context.Edi = value
elif register == 'ESP':
context.Esp = value
elif register == 'EBP':
context.Ebp = value
elif register == 'EIP':
context.Eip = value
self.set_thread_context(context)
return self.ret_self()
def set_thread_context(self, context, thread_handle=None, thread_id=0):
"""
Convenience wrapper around SetThreadContext().
Can set a thread context via a handle or thread id.
@type thread_handle: HANDLE
@param thread_handle: (Optional) Handle of thread to get context of
@type context: CONTEXT
@param context: Context to apply to specified thread
@type thread_id: Integer
@param thread_id: (Optional, Def=0) ID of thread to get context of
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
# if neither a thread handle or thread id were specified,
# default to the internal one.
if not thread_handle and not thread_id:
h_thread = self.h_thread
# if a thread handle was not specified, get one from the thread id.
elif not thread_handle:
h_thread = self.open_thread(thread_id)
# use the specified thread handle.
else:
h_thread = thread_handle
if not kernel32.SetThreadContext(h_thread, byref(context)):
raise PDError('SetThreadContext()', True)
# if we had to resolve the thread handle, close it.
if not thread_handle and thread_id:
self.close_handle(h_thread)
return self.ret_self()
def sigint_handler(self, signal_number, stack_frame):
"""Interrupt signal handler
We override the default handler to disable the run flag and
exit the main debug event loop.
@type signal_number:
@param signal_number:
@type stack_frame:
@param stack_frame:
"""
self.set_debugger_active(False)
def single_step(self, enable, thread_handle=None):
"""
Enable or disable single stepping in the specified thread or
self.h_thread if a thread handle is not specified.
@type enable: Bool
@param enable: True to enable single stepping, False to disable
@type thread_handle: Handle
@param thread_handle: (Optional, Def=None) Handle of thread to put into single step mode
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('single_step({})'.format(enable))
if not thread_handle:
thread_handle = self.h_thread
context = self.get_thread_context(thread_handle)
if enable:
# single step already enabled.
if context.EFlags & EFLAGS_TRAP:
return self.ret_self()
context.EFlags |= EFLAGS_TRAP
else:
# single step already disabled:
if not context.EFlags & EFLAGS_TRAP:
return self.ret_self()
context.EFlags = context.EFlags & (0xFFFFFFFFFF ^ EFLAGS_TRAP)
self.set_thread_context(context, thread_handle=thread_handle)
return self.ret_self()
def smart_dereference(self, address, print_dots=True, hex_dump=False):
"""
"Intelligently" discover data behind an address.
The address is dereferenced and explored in search of an ASCII
or Unicode string.
In the absense of a string the printable characters are returned with
non-printables represented as dots (.).
The location of the discovered data is returned as well as
either "heap", "stack" or the name of the module it lies in
(global data).
@type address: DWORD
@param address: Address to smart dereference
@type print_dots: Bool
@param print_dots: (Optional, def:True) Controls suppression of dot in place of non-printable
@type hex_dump: Bool
@param hex_dump: (Optional, def=False) Return a hex dump in the absense of string detection
@rtype: String
@return: String of data discovered behind dereference.
"""
try:
mbi = self.virtual_query(address)
except:
return 'N/A'
# if the address doesn't point into writable memory (stack or heap),
# then bail.
if not mbi.Protect & PAGE_READWRITE:
return 'N/A'
# if the address does point to writeable memory,
# ensure it doesn't sit on the PEB or any of the TEBs.
if mbi.BaseAddress == self.peb or mbi.BaseAddress in self.tebs.values():
return 'N/A'
try:
explored = self.read_process_memory(address, self.STRING_EXPLORATON_BUF_SIZE)
except:
return 'N/A'
# determine if the write-able address sits in the stack range.
if self.is_address_on_stack(address):
location = 'stack'
# otherwise it could be in a module's global section or on the heap.
else:
module = self.addr_to_module(address)
if module:
location = '{}.data'.format(module.szModule)
# if the write-able address is not on the stack or
# in a module range, then we assume it's on the heap.
# we *could* walk the heap structures to determine for sure,
# but it's a slow method and this process of
# elimination works well enough.
else:
location = 'heap'
explored_string = self.get_ascii_string(explored)
if not explored_string:
explored_string = self.get_unicode_string(explored)
if not explored_string and hex_dump:
explored_string = self.hex_dump(explored)
if not explored_string:
explored_string = self.get_printable_string(explored, print_dots)
if hex_dump:
return '{} --> {}'.format(explored_string, location)
else:
return '{} ({})'.format(explored_string, location)
def stack_range(self, context=None):
"""
Determine the stack range (top and bottom) of the current or specified thread.
The desired information is located at offsets 4 and 8 from
the Thread Environment Block (TEB), which in turn is pointed to by fs:0.
@type context: Context
@param context: (Optional) Current thread context to examine
@rtype: Mixed
@return: List containing (stack_top, stack_bottom) on success, False otherwise.
"""
selector_entry = LDT_ENTRY()
# if the optional current thread context was not supplied,
# grab it for the current thread.
if not context:
context = self.context
if not kernel32.GetThreadSelectorEntry(self.h_thread, context.SegFs, byref(selector_entry)):
self.win32_error('GetThreadSelectorEntry()')
fs_base = selector_entry.BaseLow
fs_base += (selector_entry.HighWord.Bits.BaseMid << 16) + (selector_entry.HighWord.Bits.BaseHi << 24)
# determine the top and bottom of the debuggee's stack.
stack_top = self.read_process_memory(fs_base + 4, 4)
stack_bottom = self.read_process_memory(fs_base + 8, 4)
stack_top = self.flip_endian_dword(stack_top)
stack_bottom = self.flip_endian_dword(stack_bottom)
return (stack_top, stack_bottom)
def stack_unwind(self, context=None):
"""
Unwind the stack to the best of our ability.
This function is really only useful if called when EBP is actually
used as a frame pointer. If it is otherwise being used as
a general purpose register then stack unwinding will
fail immediately.
@type context: Context
@param context: (Optional) Current thread context to examine
@rtype: List
@return: The current call stack ordered from most recent call backwards.
"""
self._log('stack_unwind()')
selector_entry = LDT_ENTRY()
call_stack = []
# if the optional current thread context was not supplied,
# grab it for the current thread.
if not context:
context = self.context
# determine the stack top / bottom.
(stack_top, stack_bottom) = self.stack_range(context)
this_frame = context.Ebp
while this_frame > stack_bottom and this_frame < stack_top:
# stack frame sanity check: must be DWORD boundary aligned.
if this_frame & 3:
break
try:
ret_addr = self.read_process_memory(this_frame + 4, 4)
ret_addr = self.flip_endian_dword(ret_addr)
except:
break
# return address sanity check:
# return address must live on an executable page.
try:
mbi = self.virtual_query(ret_addr)
except:
break
if mbi.Protect not in (PAGE_EXECUTE, PAGE_EXECUTE_READ, PAGE_EXECUTE_READWRITE, PAGE_EXECUTE_WRITECOPY):
break
# add the return address to the call stack.
call_stack.append(ret_addr)
# follow the frame pointer to the next frame.
try:
next_frame = self.read_process_memory(this_frame, 4)
next_frame = self.flip_endian_dword(next_frame)
except:
break
# stack frame sanity check:
# new frame must be at a higher address then the previous frame.
if next_frame <= this_frame:
break
this_frame = next_frame
return call_stack
def suspend_all_threads(self):
"""
Suspend all process threads.
@see: resume_all_threads()
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
for thread_id in self.enumerate_threads():
self.suspend_thread(thread_id)
return self.ret_self()
def suspend_thread(self, thread_id):
"""
Suspend the specified thread.
@type thread_id: DWORD
@param thread_id: ID of thread to suspend
@raise PDError: An exception is raised on failure.
@rtype: PyDBG
@return: Self
"""
self._log('suspending thread: 0x{:08x}'.format(thread_id))
thread_handle = self.open_thread(thread_id)
if kernel32.SuspendThread(thread_handle) == -1:
raise PDError('SuspendThread()', True)
self.close_handle(thread_handle)
return self.ret_self()
def terminate_process(self, exit_code=0, method='terminateprocess'):
"""
Terminate the debuggee using the specified method.
"terminateprocess": Terminate the debuggee by calling TerminateProcess(debuggee_handle).
"exitprocess": Terminate the debuggee by setting its current EIP to ExitProcess().
@type exit_code: Integer
@param exit_code: (Optional, def=0) Exit code
@type method: String
@param method: (Optonal, def="terminateprocess") Termination method. See __doc__ for more info.
@raise PDError: An exception is raised on failure.
"""
if method.lower().startswith('exitprocess'):
self.context.Eip = self.func_resolve_debuggee('kernel32', 'ExitProcess')
self.set_thread_context(self.context)
# fall back to "terminateprocess".
else:
if not kernel32.TerminateProcess(self.h_process, exit_code):
raise PDError('TerminateProcess({})'.format(exit_code), True)
def to_binary(self, number, bit_count=32):
"""
Convert a number into a binary string.
This is an ugly one liner that I ripped off of some site.
@see: to_decimal()
@type number: Integer
@param number: Number to convert to binary string.
@type bit_count: Integer
@param bit_count: (Optional, Def=32) Number of bits to include in output string.
@rtype: String
@return: Specified integer as a binary string
"""
return ''.join(map(lambda x: str((number >> x) & 1), range(bit_count - 1, -1, -1)))
def to_decimal(self, binary):
"""
Convert a binary string into a decimal number.
@see: to_binary()
@type binary: String
@param binary: Binary string to convert to decimal
@rtype: Integer
@return: Specified binary string as an integer
"""
# this is an ugly one liner that I ripped off of some site.
#return sum(map(lambda x: int(binary[x]) and 2**(len(binary) - x - 1), range(len(binary)-1, -1, -1)))
# this is much cleaner (thanks cody)
return int(binary, 2)
def virtual_alloc(self, address, size, alloc_type, protection):
"""
Convenience wrapper around VirtualAllocEx()
@type address: DWORD
@param address: Desired starting address of region to allocate, can be None
@type size: Integer
@param size: Size of memory region to allocate, in bytes
@type alloc_type: DWORD
@param alloc_type: The type of memory allocation (most often MEM_COMMIT)
@type protection: DWORD
@param protection: Memory protection to apply to the specified region
@raise PDError: An exception is raised on failure.
@rtype: DWORD
@return: Base address of the allocated region of pages.
"""
if address:
self._log('VirtualAllocEx(0x{:08x}, {}, 0x{:08x}, 0x{:08x})'.format(
address, size, alloc_type, protection))
else:
self._log('VirtualAllocEx(NULL, {}, 0x{:08x}, 0x{:08x})'.format(size, alloc_type, protection))
allocated_address = kernel32.VirtualAllocEx(self.h_process, address, size, alloc_type, protection)
if not allocated_address:
raise PDError(
'VirtualAllocEx(0x{:08x}, {}, 0x{:08x}, 0x{:08x})'.format(
address, size, alloc_type, protection), True)
return allocated_address
def virtual_free(self, address, size, free_type):
"""
Convenience wrapper around VirtualFreeEx()
@type address: DWORD
@param address: Pointer to the starting address of the region of memory to be freed
@type size: Integer
@param size: Size of memory region to free, in bytes
@type free_type: DWORD
@param free_type: The type of free operation
@raise PDError: An exception is raised on failure.
"""
self._log('VirtualFreeEx(0x{:08x}, {}, 0x{:08x})'.format(address, size, free_type))
if not kernel32.VirtualFreeEx(self.h_process, address, size, free_type):
raise PDError('VirtualFreeEx(0x{:08x}, {}, 0x{:08x})'.format(address, size, free_type), True)
def virtual_protect(self, base_address, size, protection):
"""
Convenience wrapper around VirtualProtectEx()
@type base_address: DWORD
@param base_address: Base address of region of pages whose access protection attributes are to be changed
@type size: Integer
@param size: Size of the region whose access protection attributes are to be changed
@type protection: DWORD
@param protection: Memory protection to apply to the specified region
@raise PDError: An exception is raised on failure.
@rtype: DWORD
@return: Previous access protection.
"""
# self._log("VirtualProtectEx( , 0x%08x, %d, %08x, ,)" % (base_address, size, protection))
old_protect = c_ulong(0)
success = kernel32.VirtualProtectEx(self.h_process, base_address,
size, protection,
byref(old_protect))
if not success:
raise PDError('VirtualProtectEx(0x{:08x}, {}, 0x{:08x})'.format(
base_address, size, protection), True)
return old_protect.value
def virtual_query(self, address):
"""
Convenience wrapper around VirtualQueryEx().
@type address: DWORD
@param address: Address to query
@raise PDError: An exception is raised on failure.
@rtype: MEMORY_BASIC_INFORMATION
@return: MEMORY_BASIC_INFORMATION
"""
mbi = MEMORY_BASIC_INFORMATION()
if kernel32.VirtualQueryEx(self.h_process, address, byref(mbi), sizeof(mbi)) < sizeof(mbi):
raise PDError('VirtualQueryEx(0x{:08x})'.format(address), True)
return mbi
def win32_error(self, prefix=None):
"""
Convenience wrapper around GetLastError() and FormatMessage().
Raises an exception with the relevant error code and formatted message.
@type prefix: String
@param prefix: (Optional) String to prefix error message with.
@raise PDError: An exception is always raised by this routine.
"""
error = c_char_p()
error_code = kernel32.GetLastError()
kernel32.FormatMessageA(
FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM,
None,
error_code,
0x00000400, # MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT)
byref(error),
0,
None)
if prefix:
error_message = '{}: {}'.format(prefix, error.value)
else:
error_message = 'GetLastError(): {}'.format(error.value)
raise PDError(error_message, error_code)
def write(self, address, data, length=0):
"""
Alias to write_process_memory().
@see: write_process_memory
"""
return self.write_process_memory(address, data, length)
def write_msr(self, address, data):
"""
Write data to the specified MSR address.
@see: read_msr
@type address: DWORD
@param address: MSR address to write to.
@type data: QWORD
@param data: Data to write to MSR address.
@rtype: tuple
@return: (read status, msr structure)
"""
msr = SYSDBG_MSR()
msr.Address = address
msr.Data = data
status = ntdll.NtSystemDebugControl(SysDbgWriteMsr,
byref(msr),
sizeof(SYSDBG_MSR),
0,
0,
0)
return status
def write_process_memory(self, address, data, length=0):
"""
Write to the debuggee process space.
Convenience wrapper around WriteProcessMemory(). This routine will
continuously attempt to write the data requested until it is complete.
@type address: DWORD
@param address: Address to write to
@type data: Raw Bytes
@param data: Data to write
@type length: DWORD
@param length: (Optional, Def:len(data)) Length of data, in bytes, to write
@raise PDError: An exception is raised on failure.
"""
count = c_ulong(0)
# if the optional data length parameter was omitted,
# calculate the length ourselves.
if not length:
length = len(data)
# ensure we can write to the requested memory space.
_address = address
_length = length
try:
old_protect = self.virtual_protect(_address, _length, PAGE_EXECUTE_READWRITE)
except:
pass
while length:
c_data = c_char_p(data[count.value:])
if not kernel32.WriteProcessMemory(self.h_process, address, c_data, length, byref(count)):
raise PDError('WriteProcessMemory(0x{:08x}, ..., {})'.format(address, length), True)
length -= count.value
address += count.value
# restore the original page permissions on the target memory region.
try:
self.virtual_protect(_address, _length, old_protect)
except:
pass
| [
"[email protected]"
] | |
5b93b4356e42e2778ef25836eb0d5835a1ef480c | 25f9afe371c59612e02e561a6b35b8d8bafad20a | /tests/suite/test_smoke.py | c67d4d9576cb7e6ed4820df54347905d68d8793c | [
"Apache-2.0"
] | permissive | HubBucket-Team/kubernetes-ingress | 2b8bf4ac293fb2e9aa51e18037c34029ed66a8be | 99663386df7cea013489a88b9471eb18be4c9e77 | refs/heads/master | 2020-07-07T23:18:34.157361 | 2019-08-13T13:13:39 | 2019-08-20T15:23:24 | 203,502,783 | 1 | 1 | Apache-2.0 | 2019-08-21T03:58:05 | 2019-08-21T03:58:04 | null | UTF-8 | Python | false | false | 2,734 | py | import requests
import pytest
from suite.fixtures import PublicEndpoint
from suite.resources_utils import create_secret_from_yaml, delete_secret, \
ensure_connection_to_public_endpoint, create_items_from_yaml, \
delete_items_from_yaml, create_example_app, delete_common_app, \
wait_until_all_pods_are_ready, ensure_response_from_backend
from suite.yaml_utils import get_first_ingress_host_from_yaml
from settings import TEST_DATA
paths = ["backend1", "backend2"]
class SmokeSetup:
"""
Encapsulate the Smoke Example details.
Attributes:
public_endpoint (PublicEndpoint):
ingress_host (str):
"""
def __init__(self, public_endpoint: PublicEndpoint, ingress_host):
self.public_endpoint = public_endpoint
self.ingress_host = ingress_host
@pytest.fixture(scope="class", params=["standard", "mergeable"])
def smoke_setup(request, kube_apis, ingress_controller_endpoint, ingress_controller, test_namespace) -> SmokeSetup:
print("------------------------- Deploy Smoke Example -----------------------------------")
secret_name = create_secret_from_yaml(kube_apis.v1, test_namespace, f"{TEST_DATA}/smoke/smoke-secret.yaml")
create_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/{request.param}/smoke-ingress.yaml", test_namespace)
ingress_host = get_first_ingress_host_from_yaml(f"{TEST_DATA}/smoke/{request.param}/smoke-ingress.yaml")
create_example_app(kube_apis, "simple", test_namespace)
wait_until_all_pods_are_ready(kube_apis.v1, test_namespace)
ensure_connection_to_public_endpoint(ingress_controller_endpoint.public_ip,
ingress_controller_endpoint.port,
ingress_controller_endpoint.port_ssl)
def fin():
print("Clean up the Smoke Application:")
delete_common_app(kube_apis, "simple", test_namespace)
delete_items_from_yaml(kube_apis, f"{TEST_DATA}/smoke/{request.param}/smoke-ingress.yaml",
test_namespace)
delete_secret(kube_apis.v1, secret_name, test_namespace)
request.addfinalizer(fin)
return SmokeSetup(ingress_controller_endpoint, ingress_host)
@pytest.mark.smoke
class TestSmoke:
@pytest.mark.parametrize("path", paths)
def test_response_code_200_and_server_name(self, smoke_setup, path):
req_url = f"https://{smoke_setup.public_endpoint.public_ip}:{smoke_setup.public_endpoint.port_ssl}/{path}"
ensure_response_from_backend(req_url, smoke_setup.ingress_host)
resp = requests.get(req_url, headers={"host": smoke_setup.ingress_host}, verify=False)
assert resp.status_code == 200
assert f"Server name: {path}" in resp.text
| [
"[email protected]"
] | |
6ea349337b8084df78a521f35798fd7e7555a5c5 | c5514643dd1601661abce5449674cc796247b66a | /src/event_configuration.py | a370720faf27ec54a7e64737991585f2e0fd77c9 | [] | no_license | happeninghq/happening-comments | 0e456d593608cc1ff63e3df2109e039b8d80f921 | d330c3fcfb648e02be9466640570764f623945f0 | refs/heads/master | 2021-01-20T19:06:42.744164 | 2017-04-05T13:36:25 | 2017-04-05T13:36:25 | 59,932,570 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 342 | py | """Event Configuration."""
from happening import configuration
from happening import plugins
class CommentOnGroups(configuration.BooleanField):
"""Can members comment on groups."""
@property
def settable(self):
"""Only enable if the groups plugin is enabled."""
return plugins.plugin_enabled("plugins.groups")
| [
"[email protected]"
] | |
3424b73d226dbd1d83678cf172706f34105222d2 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/frogjump_20200717123008.py | e4f45f8cc2fdaa932a6cbf2ca3a79c65a2b5a589 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 399 | py | # use the brute force approach then optimize
# and test edge cases
# o(n)
def jump(X,Y,D):
if X == Y:
return 0
if D < 1:
return 0
else:
answer = round((Y-X) / D
print(answer)
# count = 0
# while X < Y:
# print('hmm')
# X = X+D
# count +=1
# return count
print(jump(10,85,30)) | [
"[email protected]"
] | |
bac232b847d7bdec1b84d7b26513593e311f39e1 | a46d135ba8fd7bd40f0b7d7a96c72be446025719 | /packages/python/plotly/plotly/validators/choropleth/colorbar/_outlinewidth.py | f67ddb9933f89083f1ad283ae70d3c68d77de79f | [
"MIT"
] | permissive | hugovk/plotly.py | 5e763fe96f225d964c4fcd1dea79dbefa50b4692 | cfad7862594b35965c0e000813bd7805e8494a5b | refs/heads/master | 2022-05-10T12:17:38.797994 | 2021-12-21T03:49:19 | 2021-12-21T03:49:19 | 234,146,634 | 0 | 0 | MIT | 2020-01-15T18:33:43 | 2020-01-15T18:33:41 | null | UTF-8 | Python | false | false | 480 | py | import _plotly_utils.basevalidators
class OutlinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="outlinewidth", parent_name="choropleth.colorbar", **kwargs
):
super(OutlinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs
)
| [
"[email protected]"
] | |
1677c70b6145d13b113dac0029cc065ee6ccb880 | 409058c5b829e764d0e31566e8afaaa6baac61ca | /examples/15_muti_robot/multi_drone/01_scan_ip.py | 273b4e322e17bbfdd084712c5758607931858983 | [] | no_license | iced07/robomaster | 20528d84c7a9f8a4fd54b0bf0016b10097d41b5d | d60f79cba4531ba1c216f4d33211d76772a5bfe9 | refs/heads/master | 2022-12-24T00:52:04.986682 | 2020-09-18T14:17:21 | 2020-09-18T14:18:03 | 298,323,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 849 | py | # -*-coding:utf-8-*-
# Copyright (c) 2020 DJI, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multi_robomaster import multi_robot
if __name__ == '__main__':
multi_drone = multi_robot.MultiDrone()
drone_ip_list = multi_drone._scan_multi_robot(2)
print("scan ip result: {0}".format(drone_ip_list))
| [
"[email protected]"
] | |
2a5c757d67b5a4fcd73e07832a8a1b762878d752 | be317396416134fc18d598934403906b9b1a7583 | /word_data_gen.py | b738412179d82da71185cc5407c91359cbaad683 | [
"Apache-2.0"
] | permissive | Guhaifudeng/zhihukankan | 26d5c40638035c9a13b0e24b789afd11c6eb157f | ccb216458a74d85bf048b0da11146716026b7ce3 | refs/heads/master | 2020-06-19T07:36:54.996472 | 2017-07-08T09:09:28 | 2017-07-08T09:09:28 | 94,181,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,447 | py | #encoding:utf-8
import codecs
import word_util
def gen_word_key(word_embedding_file,word_key_file,has_head = False):
w_write = codecs.open(word_key_file,'w', 'utf-8')
with codecs.open(word_embedding_file, 'r', 'utf-8') as w_read:
count = 0
w_key = ''
while True:
line = w_read.readline()
if not line:
break
if has_head:
has_head = False
continue
count += 1
if count % 1000 == 0:
print('load word count %d' % count)
w_key += '\t' + line.strip().split(" ")[0]
#print(w_key)
w_write.write(w_key[1:]+'\n')
print("count of words in word_embedding_file %d" % count)
print("finished !")
def gen_word_key_after_removed(word_idf_map, word_key_after_removed_file):
with codecs.open(word_key_after_removed_file,'w','utf-8') as w_write:
rm_list = []
for (key, idf) in word_idf_map.items():
if float(idf) < 1e-6:
rm_list.append(key)
for key in rm_list:
word_idf_map.pop(key)
word_key = word_idf_map.keys()
w_write.write('\t'.join(word_key)+'\n')
w_write.close()
def gen_word_tfidf_after_removed(word_keys_tfidf_after_removed_file,word_tfidf_map,word_keys):
with codecs.open(word_keys_tfidf_after_removed_file,'w','utf-8') as w_tfidf_write:
word_keys = []
word_tfidf = []
for (key, tfidf) in word_tfidf_map.items():
if key in word_keys:
word_keys.append(key)
word_tfidf.append(tfidf)
w_tfidf_write.write('\t'.join(word_keys)+'\n')
w_tfidf_write.write('\t'.join(word_tfidf)+'\n')
w_tfidf_write.close()
if __name__ == '__main__':
#gen_word_key('../data/word_embedding.txt','../out/word_keys.txt',True)
word_key_tfidf_after_removed_file = '../out/partition_tfidf.txt'
# word_idf_map = word_util.build_word_idf_hashmap('../out/global_idf.txt')
# gen_word_key_after_removed(word_idf_map, word_key_after_removed_file)
word_tfidf_map = word_util.build_word_tfidf_hashmap('../out/global_tfidf.txt')
word_keys = word_util.build_word_keys_hashmap('../out/word_keys_rmd.txt')
gen_word_tfidf_after_removed(word_key_tfidf_after_removed_file,word_tfidf_map,word_keys)
print('finished')
| [
"="
] | = |
d4483efe86d7062fd477ca674becd6f9d965816e | fd8d33572656edf9e1133a72ad4e2fa090f90a5f | /packages/OpenWeatherMap/nodes/OpenWeatherMap___BreakTemp0/OpenWeatherMap___BreakTemp0___METACODE.py | 389970e2b1ffbdae5d0a55956bfa72961c159947 | [
"MIT"
] | permissive | ChristianHohlfeld/Ryven | a01c2eafa79a80883a9490efb5f043fd35f53484 | 53bf7e57a7b0fa25a704cd0d2214a7f76096d4dd | refs/heads/master | 2022-12-12T22:03:57.122034 | 2020-08-31T13:45:45 | 2020-08-31T13:45:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,445 | py | from NIENV import *
# API METHODS
# self.main_widget <- access to main widget
# Ports
# self.input(index) <- access to input data
# self.set_output_val(self, index, val) <- set output data port value
# self.exec_output(index) <- executes an execution output
# self.create_new_input(type_, label, widget_name=None, widget_pos='under', pos=-1)
# self.delete_input(index or input)
# self.create_new_output(type_, label, pos=-1)
# self.delete_output(index or output)
# Logging
# mylog = self.new_log('Example Log')
# mylog.log('I\'m alive!!')
# self.log_message('hello global!', 'global')
# self.log_message('that\'s not good', 'error')
# ------------------------------------------------------------------------------
from pyowm.utils.measurables import kelvin_to_celsius, kelvin_to_fahrenheit
class %NODE_TITLE%_NodeInstance(NodeInstance):
def __init__(self, parent_node: Node, flow, configuration=None):
super(%NODE_TITLE%_NodeInstance, self).__init__(parent_node, flow, configuration)
# self.special_actions['action name'] = {'method': M(self.action_method)}
# ...
self.initialized()
# don't call self.update_event() directly, use self.update() instead
def update_event(self, input_called=-1):
temp_dict = self.input(0)
if self.input(1) != 'kelvin':
for key in list(temp_dict.keys()):
item = temp_dict[key]
if item is not None:
if self.input(1) == 'celsius':
temp_dict[key] = kelvin_to_celsius(item)
elif self.input(1) == 'fahrenheit':
temp_dict[key] = kelvin_to_fahrenheit(item)
# temp_dict = kelvin_dict_to(temp_dict, self.input(1)) doesn't work with NoneType values -.- which happen to persist
temp = temp_dict['temp']
temp_kf = temp_dict['temp_kf']
temp_max = temp_dict['temp_max']
temp_min = temp_dict['temp_min']
feels_like = temp_dict['feels_like']
self.set_output_val(0, temp)
self.set_output_val(1, temp_kf)
self.set_output_val(2, temp_min)
self.set_output_val(3, temp_max)
self.set_output_val(4, feels_like)
def get_data(self):
data = {}
# ...
return data
def set_data(self, data):
pass # ...
def remove_event(self):
pass
| [
"[email protected]"
] | |
b093ca38f2f191bd61045b53e218e509e9ee9255 | 372eefa7d896d3cee8c1c1befd8d3baec4eb0188 | /infra/services/cicd/artifacts.py | 73cce818ce93475357036f6865901e922b7e813b | [] | no_license | dr-natetorious/aws-homenet | 5c17f4c3e1fcd60f50d22b5b94453f1d965d4ca0 | d5382c7ada2c9bd5dc0b3687d57d47282791ed40 | refs/heads/master | 2023-06-21T17:40:08.721233 | 2021-07-22T18:08:50 | 2021-07-22T18:08:50 | 307,004,525 | 1 | 0 | null | 2021-06-27T18:05:01 | 2020-10-25T01:45:42 | Python | UTF-8 | Python | false | false | 1,131 | py | from infra.interfaces import ILandingZone
from aws_cdk import (
core,
aws_codeartifact as art,
aws_route53 as r53,
)
class ArtifactsConstruct(core.Construct):
"""
Represents a code artifact repository.
"""
def __init__(self, scope: core.Construct, id: str, landing_zone:ILandingZone,zone:r53.IHostedZone, **kwargs) -> None:
super().__init__(scope, id, **kwargs)
core.Tags.of(self).add('Construct',ArtifactsConstruct.__name__)
self.domain = art.CfnDomain(self,'Domain',
domain_name=landing_zone.zone_name)
self.repo = art.CfnRepository(self,'PyPi',
domain_name=self.domain.domain_name,
repository_name='pypi-store',
description='PyPI connector',
external_connections=['public:pypi'])
self.repo = art.CfnRepository(self,'DefaultRepo',
repository_name=landing_zone.zone_name,
domain_name= self.domain.domain_name,
#upstreams=['pypi-store'],
description='Artifacts for '+zone.zone_name)
r53.CnameRecord(self,'DnsRecord',
zone=zone,
record_name='artifacts.{}'.format(zone.zone_name),
domain_name=self.domain.domain_name)
| [
"nate@bachmeier"
] | nate@bachmeier |
02ce7282c2e5418148795d72cfafea3df7b36c38 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_129/ch82_2019_06_03_22_51_02_844625.py | 26ab896be3fc59c6151a93badfc0832f10734bda | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | def primeiras_ocorrencias(palavra):
dic = {}
for e in palavra:
if e not in dic:
dic[e] = 1
else:
dic[e] = +=1
return dic
| [
"[email protected]"
] | |
1fa3c7c2af1235677a3196c7fa0ba42253cf7339 | e82b761f53d6a3ae023ee65a219eea38e66946a0 | /All_In_One/addons/Renamer.py | 97190580daf2e8848a7d452f77642f9dba9bf729 | [] | no_license | 2434325680/Learnbgame | f3a050c28df588cbb3b14e1067a58221252e2e40 | 7b796d30dfd22b7706a93e4419ed913d18d29a44 | refs/heads/master | 2023-08-22T23:59:55.711050 | 2021-10-17T07:26:07 | 2021-10-17T07:26:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,910 | py | ######################################################################################################
# A tool to easely renamer objects, materials,... #
# Actualy partly uncommented - if you do not understand some parts of the code, #
# please see further version or contact me #
# Author: Lapineige #
# License: GPL v3 #
######################################################################################################
############# Add-on description (used by Blender)
bl_info = {
"name": "Renamer",
"description": '',
"author": "Lapineige",
"version": (1, 0),
"blender": (2, 71, 0),
"location": "3D View > Toolself > Rename (tab)",
"warning": "",
"wiki_url": "",
"tracker_url": "http://le-terrier-de-lapineige.over-blog.com/contact",
"category": "Learnbgame",
}
import bpy
bpy.types.Scene.source_name = bpy.props.StringProperty()
bpy.types.Scene.new_name = bpy.props.StringProperty()
bpy.types.Scene.rename_mode = bpy.props.EnumProperty(items =[('objects','Object',"",1),('materials','Material',"",2),('textures','Texture',"",3),('meshes','Mesh',"",4),('lamps','Lamp',"",5),('scenes','Scene',"",6),('worlds','World',"",7)])
bpy.types.Scene.only_selection= bpy.props.BoolProperty(default=False)
class Rename(bpy.types.Operator):
""" """
bl_idname = "scene.rename"
bl_label = "Rename"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
source_name = context.scene.source_name
new_name = context.scene.new_name
if context.scene.rename_mode == 'objects' and context.scene.only_selection:
to_rename_list = bpy.data.objects
for foo in to_rename_list:
if source_name in foo.name and foo.select:
foo.name = foo.name[:foo.name.index(source_name)] + new_name + foo.name[foo.name.index(source_name)+len(source_name):]
else:
exec('to_rename_list = bpy.data.' + context.scene.rename_mode +'\n' +'for foo in to_rename_list:' +'\n'+ ' if source_name in foo.name:'+'\n'+' foo.name = foo.name[:foo.name.index(source_name)] + new_name + foo.name[foo.name.index(source_name)+len(source_name):]')
return {'FINISHED'}
class SwitchName(bpy.types.Operator):
""" """
bl_idname = "scene.switch_name"
bl_label = "Switch source/new name"
@classmethod
def poll(cls, context):
return True
def execute(self, context):
context.scene.new_name , context.scene.source_name = context.scene.source_name , context.scene.new_name
return {'FINISHED'}
class RenamePanel(bpy.types.Panel):
""" """
bl_label = "Rename"
bl_space_type = 'VIEW_3D'
bl_region_type = 'TOOLS'
bl_category = "Rename"
def draw(self, context):
layout = self.layout
layout.prop(context.scene, "rename_mode", text="Mode")
if context.scene.rename_mode == 'objects':
layout.prop(context.scene, 'only_selection', text='Only Selected')
layout.prop(context.scene, "source_name")
layout.prop(context.scene, "new_name")
row = layout.row(align=True)
row.operator("scene.rename", icon="FILE_TEXT")
row.operator("scene.switch_name", text='', icon="ARROW_LEFTRIGHT")
def register():
bpy.utils.register_class(RenamePanel)
bpy.utils.register_class(Rename)
bpy.utils.register_class(SwitchName)
def unregister():
bpy.utils.unregister_class(RenamePanel)
bpy.utils.unregister_class(Rename)
bpy.utils.unregister_class(SwitchName)
if __name__ == "__main__":
register()
| [
"[email protected]"
] | |
1f80f5d806fd487bb6af162e32e82e2567da9491 | 08427cf6764cb646fdd37eb239dc6dde0be68ad7 | /python/leetcode.153.py | 8f2d4f9af6fd521c1079ee04cb577821d613a829 | [] | no_license | CalvinNeo/LeetCode | 9d8fa71a1da8c926b5f39659a1befcfa06608945 | 02ebe56cd92b9f4baeee132c5077892590018650 | refs/heads/master | 2020-12-31T00:41:14.031066 | 2020-11-06T04:41:59 | 2020-11-06T04:41:59 | 80,634,710 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | def se(arr, fr, to):
if to == fr:
return arr[fr]
elif to - fr == 1:
return min(arr[to], arr[fr])
mid = (fr + to) / 2
if arr[fr] > arr[mid]:
return se(arr, fr, mid)
elif arr[mid + 1] > arr[to]:
return se(arr, mid + 1, to)
else:
return arr[mid + 1]
class Solution(object):
def findMin(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
flag = True
for i in xrange(1, len(nums)):
if nums[i] < nums[i - 1]:
flag = False
break
if not flag:
return se(nums, 0, len(nums) - 1)
else:
return nums[0]
sln = Solution()
print sln.findMin([1,2,3,4,5,6])
print sln.findMin([4,5,6,1,2,3])
print sln.findMin([2,3,1])
print sln.findMin([3,1,2])
print sln.findMin([2,1])
print sln.findMin([3,4,1,2])
print sln.findMin([2,3,4,1]) | [
"[email protected]"
] | |
b784b3f3d2161fdf8531127e08a4cb750cc38d02 | 80fe5bd6413fb6366efba5f7a5d75edd7bca5295 | /snake_game/scoreboard.py | 1807cafea2c95dceb667bdecb07d2cb21b7d63e2 | [] | no_license | toastding/collections | 37893167ca178a289b6d88b585cc056488726691 | 9c70d3ecaec211fa68d8de598af59f53f7dcbc1e | refs/heads/master | 2023-07-14T08:01:37.851618 | 2021-08-31T13:42:43 | 2021-08-31T13:42:43 | 360,877,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,028 | py | from turtle import Turtle
ALIGNMENT = "center"
FONT = ("Verdana", 20, "normal")
class Scoreboard(Turtle):
def __init__(self):
super().__init__()
self.score = 0
with open("data.txt") as data:
self.high_score = int(data.read())
self.penup()
self.goto(0, 270)
self.pencolor("white")
self.hideturtle()
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.write(f"Score: {self.score} High Score: {self.high_score}", align=ALIGNMENT, font=FONT)
def reset(self):
if self.score > self.high_score:
self.high_score = self.score
with open("data.txt", mode="w") as data:
data.write(f"{self.high_score}")
self.score = 0
self.update_scoreboard()
# def game_over(self):
# self.goto(0, 0)
# self.write("GAME OVER", align=ALIGNMENT, font=FONT)
def increase_score(self):
self.score += 1
self.update_scoreboard()
| [
"[email protected]"
] | |
049e187c03f97db786c9e2c1f574457db6e103ed | f87f51ec4d9353bc3836e22ac4a944951f9c45c0 | /.history/HW02_20210630154801.py | 30b513b1fbb089d9ef3ed3c11ddca365506bce60 | [] | no_license | sanjayMamidipaka/cs1301 | deaffee3847519eb85030d1bd82ae11e734bc1b7 | 9ddb66596497382d807673eba96853a17884d67b | refs/heads/main | 2023-06-25T04:52:28.153535 | 2021-07-26T16:42:44 | 2021-07-26T16:42:44 | 389,703,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,560 | py | """
Georgia Institute of Technology - CS1301
HW02 - Conditionals and Loops
Collaboration Statement:
"""
#########################################
"""
Function Name: snackBar()
Parameters: snack (str), ingredient (str), yourMoney (float)
Returns: whether you can get the snack (bool)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def snackBar(snack, ingredient, yourMoney):
if snack == 'Hotdog':
if not ingredient == 'Gluten' and not ingredient == 'Meat' and yourMoney >= 5.99:
return True
else:
return False
if snack == 'Veggie Burger':
if not ingredient == 'Gluten' and yourMoney >= 5.99:
return True
else:
return False
if snack == 'Chili Bowl':
if not ingredient == 'Meat' and yourMoney >= 3.99:
return True
else:
return False
if snack == 'Chili Cheese Fries':
if not ingredient == 'Meat' and not ingredient == 'Diary' and yourMoney >= 4.99:
return True
else:
return False
"""
Function Name: waterGames()
Parameters: gameName (str), numPlayers (int), totalFriends (int)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
def waterGames(gameName, numPlayers, totalFriends):
percentPlaying = numPlayers / totalFriends
if percentPlaying < 0.3:
print('Let’s choose something else.')
elif percentPlaying >= 0.3 and percentPlaying < 0.75:
print('We will {} for a little bit!'.format(gameName))
"""
Function Name: summerShopping()
Parameters: clothingItem (str), size (str)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: stopGame()
Parameters: initialPrice (float), finalPrice (float), percentGrowth (float)
Returns: numberOfDays (int)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
"""
Function Name: adventure()
Parameters: startDay (int), stopDay (int), hikeLimit(int)
Returns: None (NoneType)
"""
#########################################
########## WRITE FUNCTION HERE ##########
#########################################
| [
"[email protected]"
] | |
6981f306ac39b5d7c1aa2ca6983e0fbd6357d408 | eeb3d7c9ff4c882ac913ee8e00b2201bcfdd300f | /string/38.count-and-say.py | 612a90ec86afcaa5842e1c6f1a6a0e87aac53369 | [] | no_license | naseeihity/leetcode-daily | f89888328a1181e0592f09e90fea105d1568af99 | 4992a967ddccd05ab777dad69ce2f832dae26ae5 | refs/heads/master | 2023-04-23T06:08:35.473663 | 2021-05-10T15:19:33 | 2021-05-10T15:19:33 | 235,041,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | #
# @lc app=leetcode id=38 lang=python3
#
# [38] Count and Say
#
# @lc code=start
class Solution:
def countAndSay(self, n: int) -> str:
def say(self, ans):
_ans = ""
l, r = 0, len(ans)
for i in range(1, r+1):
if i == r or ans[l] != ans[i]:
count = i - l
_ans += str(count)
_ans += ans[l]
l = i
return _ans
def countAndSay(self, n: int) -> str:
ans = "1"
for i in range(1, n):
ans = self.say(ans)
return ans
# @lc code=end
| [
"[email protected]"
] | |
2b6b23388fe62b64f777be4d9d1c785a09b4fd7c | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /benchmark/startQiskit_Class1958.py | 690424946f0df1baa082d94199b57207c409b8f0 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,922 | py | # qubit number=4
# total number=27
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.cx(input_qubit[0],input_qubit[3]) # number=24
prog.x(input_qubit[3]) # number=25
prog.cx(input_qubit[0],input_qubit[3]) # number=26
prog.rx(-1.9352210746113125,input_qubit[3]) # number=14
prog.cx(input_qubit[1],input_qubit[2]) # number=22
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.y(input_qubit[2]) # number=13
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.rx(-1.9069467407290044,input_qubit[2]) # number=20
prog.h(input_qubit[3]) # number=21
prog.y(input_qubit[2]) # number=10
prog.h(input_qubit[1]) # number=17
prog.cz(input_qubit[3],input_qubit[1]) # number=18
prog.h(input_qubit[1]) # number=19
prog.y(input_qubit[2]) # number=11
prog.cx(input_qubit[1],input_qubit[0]) # number=15
prog.cx(input_qubit[1],input_qubit[0]) # number=16
prog.z(input_qubit[3]) # number=23
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class1958.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
| [
"[email protected]"
] | |
48306c72966c1c9ee9c538e95b126fab6e9107cb | 430bd6e15ce181fdbce4dd769cdfc971b43e9d5b | /doughnuts/doughnuts.py | c2437f1b3be0adb4fa3a15bd6216eb97023c4981 | [
"MIT"
] | permissive | kodosan/Doughnuts | 171e58415804af12cc54ed34b5b8510823dda70f | e246707390fb9c708241d35517a7d773858dbca7 | refs/heads/master | 2023-04-01T04:00:12.200820 | 2021-04-01T06:42:46 | 2021-04-01T06:42:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,169 | py | import builtins
from os import path
from sys import argv
from json import loads, JSONDecodeError
from helpmenu import register_helpmenu
from libs.app import Loop_init, run_loop
from libs.config import gset, gget, custom_set, color
from libs.runtime_config import CONFIG
from libs.myapp import banner
builtins.ic = lambda *a, **kw: None
if (CONFIG["DEV"]):
try:
from icecream import ic
builtins.ic = ic
except ImportError:
pass
def main(print_banner: bool = True):
if (print_banner):
banner()
gset("root_path", path.split(path.realpath(__file__))[0])
with open(path.join(gget("root_path"), "auxiliary", "user_agents", "ua.txt"), "r") as f:
gset("user_agents", f.readlines())
register_helpmenu()
try:
with open(path.join(gget("root_path"), "variables.config"), "r") as f:
try:
for key, value in loads(f.read()).items():
custom_set(key=key, value=value)
print(
f"\n{color.green('Variable(s) loaded successfully from file variables.config')}\n")
except JSONDecodeError:
print(
f"\n{color.yellow('Variable(s) could not be read correctly')}\n")
except FileNotFoundError:
pass
except IOError:
print(f"\n{color.red('Permission denied to read variables.config')}\n")
run_loop(My_Loop_init(), leave_message="Bye! Doughnuts:)")
class My_Loop_init(Loop_init):
def set_platforms(self) -> dict:
return {"main": "main_plugins", "webshell": "webshell_plugins", "general": "general", "encode": "encode"}
def set_prompts(self) -> dict:
return {"main": "doughnuts > ", "webshell": "> "}
if __name__ == "__main__":
argc = len(argv)
if (argc > 1):
if (argv[1].lower() in ["generate", "gen"] and 1 < argc < 8):
gset("outside", True)
from main_plugins.generate import outside_generate as generate
generate(*argv[2:])
elif (argv[1] in ["connect", "c"]):
gset("preload_command", " ".join(argv[1:]))
main(False)
else:
main()
| [
"[email protected]"
] | |
cb98b4186be427666468060d4c7ba090787f0417 | b70eb5577099f88ae9f684f2c87647f98e26d42b | /hpc-historias-clinicas/historias/migrations/0012_auto_20150425_1937.py | 6e8d224cdc4208b57deb58837147f39f9868e9f2 | [] | no_license | btenaglia/hpc-historias-clinicas | be1a392a119a72055ba643fba9c9a09b740aef47 | 649d8660381381b1c591667760c122d73071d5ec | refs/heads/master | 2020-06-03T19:05:17.910077 | 2015-06-10T23:05:31 | 2015-06-10T23:05:31 | 32,827,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,145 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('historias', '0011_auto_20150425_1936'),
]
operations = [
migrations.AlterField(
model_name='historias',
name='dpto_cirugia',
field=models.ForeignKey(default=1, verbose_name='M\xe9dico Responsable', to='historias.DptosCirugiaGeneral'),
preserve_default=False,
),
migrations.AlterField(
model_name='historias',
name='fecha_ingreso',
field=models.DateField(default=datetime.datetime(2015, 4, 25, 19, 37, 9, 146737), help_text='Formato: dd/mm/yyyy', verbose_name='Fecha de Ingreso'),
preserve_default=True,
),
migrations.AlterField(
model_name='historias',
name='hora_ingreso',
field=models.TimeField(default=datetime.datetime(2015, 4, 25, 19, 37, 9, 146689), help_text='Formato: hh:mm', verbose_name='Hora de Ingreso'),
preserve_default=True,
),
]
| [
"[email protected]"
] | |
02f80d7db50e7e0eb74301cf11154423cea8e4de | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/9/v4y.py | 54854ac0aa3d6bf0ce044686b2f34bcdc430f6d8 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'v4Y':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
0f1e5f7680d8e48b6c5d8230307c0b7c1c017512 | 3f5bf0ed01ff34036b0476e82acdcdd646f66859 | /visualocean/__init__.py | 08c3257e01c6c8fe7351a67a26d464ab08f8a2c6 | [] | no_license | cpsarason/visualoceanpy | 9e64590f16659d61b6fefd9fc912a6c868175226 | b712e73849226dbdebf1c8da57bf00098ed1f4df | refs/heads/master | 2021-03-31T01:15:26.751240 | 2018-03-13T23:45:18 | 2018-03-13T23:45:18 | 125,122,884 | 0 | 0 | null | 2018-03-13T22:31:16 | 2018-03-13T22:31:15 | null | UTF-8 | Python | false | false | 219 | py | from __future__ import (absolute_import,
division,
print_function,
unicode_literals)
__author__ = 'Landung Setiawan'
__all__ = ['core', 'utils']
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.