hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bcc4fcfb44a442a2523238a8484bf80417464006
| 5,084 |
py
|
Python
|
tests/integration_tests/security/test_seccomp.py
|
gregbdunn/firecracker
|
e7bc0a1f9b70deaa7bfd9eb641e0c7982fe63e68
|
[
"Apache-2.0"
] | 2 |
2018-12-20T05:40:43.000Z
|
2018-12-20T05:59:58.000Z
|
tests/integration_tests/security/test_seccomp.py
|
gregbdunn/firecracker
|
e7bc0a1f9b70deaa7bfd9eb641e0c7982fe63e68
|
[
"Apache-2.0"
] | null | null | null |
tests/integration_tests/security/test_seccomp.py
|
gregbdunn/firecracker
|
e7bc0a1f9b70deaa7bfd9eb641e0c7982fe63e68
|
[
"Apache-2.0"
] | 1 |
2018-11-27T08:50:51.000Z
|
2018-11-27T08:50:51.000Z
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests that the seccomp filters don't let blacklisted syscalls through."""
import os
from subprocess import run
import pytest
import host_tools.cargo_build as host # pylint:disable=import-error
def test_seccomp_ls(tmp_basic_jailer):
"""Assert that the seccomp filters deny a blacklisted syscall."""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
# Path to the `ls` binary, which attempts to execute `SYS_access`,
# blacklisted for Firecracker.
ls_command_path = '/bin/ls'
demo_jailer = tmp_basic_jailer
assert os.path.exists(demo_jailer)
# Compile the mini jailer.
outcome = run([demo_jailer, ls_command_path])
# The seccomp filters should send SIGSYS (31) to the binary. `ls` doesn't
# handle it, so it will exit with error.
assert outcome.returncode != 0
def test_advanced_seccomp_harmless(tmp_advanced_seccomp_binaries):
"""
Test `demo_harmless_firecracker`.
Test that the built demo jailer allows the built demo harmless firecracker.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
demo_advanced_jailer, demo_harmless_firecracker, _ =\
tmp_advanced_seccomp_binaries
assert os.path.exists(demo_advanced_jailer)
assert os.path.exists(demo_harmless_firecracker)
outcome = run([demo_advanced_jailer, demo_harmless_firecracker])
# The demo harmless firecracker should have terminated gracefully.
assert outcome.returncode == 0
def test_advanced_seccomp_malicious(tmp_advanced_seccomp_binaries):
"""
Test `demo_malicious_firecracker`.
Test that the built demo jailer denies the built demo malicious
firecracker.
"""
# pylint: disable=redefined-outer-name
# The fixture pattern causes a pylint false positive for that rule.
demo_advanced_jailer, _, demo_malicious_firecracker =\
tmp_advanced_seccomp_binaries
assert os.path.exists(demo_advanced_jailer)
assert os.path.exists(demo_malicious_firecracker)
outcome = run([demo_advanced_jailer, demo_malicious_firecracker])
# The demo malicious firecracker should have received `SIGSYS`.
assert outcome.returncode != 0
| 29.387283 | 79 | 0.696302 |
bcc54625026e4e77ba54fe67d05a342fde131c90
| 185 |
py
|
Python
|
cluster/density/test.py
|
michealowen/MachingLearning
|
9dcc908f2d3e468390e5abb7f051b449b0ecb455
|
[
"Apache-2.0"
] | 2 |
2019-09-11T07:02:25.000Z
|
2020-12-17T10:40:02.000Z
|
cluster/density/test.py
|
michealowen/MachingLearning
|
9dcc908f2d3e468390e5abb7f051b449b0ecb455
|
[
"Apache-2.0"
] | null | null | null |
cluster/density/test.py
|
michealowen/MachingLearning
|
9dcc908f2d3e468390e5abb7f051b449b0ecb455
|
[
"Apache-2.0"
] | null | null | null |
aa = a(1)
aa.go()
| 12.333333 | 26 | 0.475676 |
bcc5a1b4f97e4fd31b1d8727fc7f8a5dcff9e43e
| 566,013 |
py
|
Python
|
REM/Tool/IDA 7.3/python/ida_hexrays.py
|
dodieboy/Np_class
|
af9ec993eda3c1e2bf70257c8384696bb64a5e9d
|
[
"MIT"
] | null | null | null |
REM/Tool/IDA 7.3/python/ida_hexrays.py
|
dodieboy/Np_class
|
af9ec993eda3c1e2bf70257c8384696bb64a5e9d
|
[
"MIT"
] | null | null | null |
REM/Tool/IDA 7.3/python/ida_hexrays.py
|
dodieboy/Np_class
|
af9ec993eda3c1e2bf70257c8384696bb64a5e9d
|
[
"MIT"
] | 2 |
2021-03-30T00:46:58.000Z
|
2021-12-12T23:41:12.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 2.0.12
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
"""
IDA Plugin SDK API wrapper: hexrays
"""
from sys import version_info
if version_info >= (2,6,0):
_ida_hexrays = swig_import_helper()
del swig_import_helper
else:
import _ida_hexrays
del version_info
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
try:
_object = object
_newclass = 1
except AttributeError:
_newclass = 0
try:
import weakref
weakref_proxy = weakref.proxy
except:
weakref_proxy = lambda x: x
import ida_idaapi
import sys
_BC695 = sys.modules["__main__"].IDAPYTHON_COMPAT_695_API
if _BC695:
import ida_pro
import ida_xref
import ida_typeinf
import ida_idp
def _kludge_use_TPopupMenu(*args):
"""
_kludge_use_TPopupMenu(m)
"""
return _ida_hexrays._kludge_use_TPopupMenu(*args)
array_of_bitsets_swigregister = _ida_hexrays.array_of_bitsets_swigregister
array_of_bitsets_swigregister(array_of_bitsets)
mopvec_t_swigregister = _ida_hexrays.mopvec_t_swigregister
mopvec_t_swigregister(mopvec_t)
mcallargs_t_swigregister = _ida_hexrays.mcallargs_t_swigregister
mcallargs_t_swigregister(mcallargs_t)
block_chains_vec_t_swigregister = _ida_hexrays.block_chains_vec_t_swigregister
block_chains_vec_t_swigregister(block_chains_vec_t)
user_numforms_t_swigregister = _ida_hexrays.user_numforms_t_swigregister
user_numforms_t_swigregister(user_numforms_t)
lvar_mapping_t_swigregister = _ida_hexrays.lvar_mapping_t_swigregister
lvar_mapping_t_swigregister(lvar_mapping_t)
hexwarns_t_swigregister = _ida_hexrays.hexwarns_t_swigregister
hexwarns_t_swigregister(hexwarns_t)
ctree_items_t_swigregister = _ida_hexrays.ctree_items_t_swigregister
ctree_items_t_swigregister(ctree_items_t)
user_labels_t_swigregister = _ida_hexrays.user_labels_t_swigregister
user_labels_t_swigregister(user_labels_t)
user_cmts_t_swigregister = _ida_hexrays.user_cmts_t_swigregister
user_cmts_t_swigregister(user_cmts_t)
user_iflags_t_swigregister = _ida_hexrays.user_iflags_t_swigregister
user_iflags_t_swigregister(user_iflags_t)
user_unions_t_swigregister = _ida_hexrays.user_unions_t_swigregister
user_unions_t_swigregister(user_unions_t)
cinsnptrvec_t_swigregister = _ida_hexrays.cinsnptrvec_t_swigregister
cinsnptrvec_t_swigregister(cinsnptrvec_t)
eamap_t_swigregister = _ida_hexrays.eamap_t_swigregister
eamap_t_swigregister(eamap_t)
boundaries_t_swigregister = _ida_hexrays.boundaries_t_swigregister
boundaries_t_swigregister(boundaries_t)
def user_iflags_second(*args):
"""
user_iflags_second(p) -> int32 const &
Get reference to the current map value.
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_second(*args)
cfuncptr_t_swigregister = _ida_hexrays.cfuncptr_t_swigregister
cfuncptr_t_swigregister(cfuncptr_t)
qvector_history_t_swigregister = _ida_hexrays.qvector_history_t_swigregister
qvector_history_t_swigregister(qvector_history_t)
history_t_swigregister = _ida_hexrays.history_t_swigregister
history_t_swigregister(history_t)
qlist_cinsn_t_iterator_swigregister = _ida_hexrays.qlist_cinsn_t_iterator_swigregister
qlist_cinsn_t_iterator_swigregister(qlist_cinsn_t_iterator)
qvector_lvar_t_swigregister = _ida_hexrays.qvector_lvar_t_swigregister
qvector_lvar_t_swigregister(qvector_lvar_t)
qlist_cinsn_t_swigregister = _ida_hexrays.qlist_cinsn_t_swigregister
qlist_cinsn_t_swigregister(qlist_cinsn_t)
qvector_carg_t_swigregister = _ida_hexrays.qvector_carg_t_swigregister
qvector_carg_t_swigregister(qvector_carg_t)
qvector_ccase_t_swigregister = _ida_hexrays.qvector_ccase_t_swigregister
qvector_ccase_t_swigregister(qvector_ccase_t)
lvar_saved_infos_t_swigregister = _ida_hexrays.lvar_saved_infos_t_swigregister
lvar_saved_infos_t_swigregister(lvar_saved_infos_t)
ui_stroff_ops_t_swigregister = _ida_hexrays.ui_stroff_ops_t_swigregister
ui_stroff_ops_t_swigregister(ui_stroff_ops_t)
def qswap(*args):
"""
qswap(a, b)
"""
return _ida_hexrays.qswap(*args)
fnum_array_swigregister = _ida_hexrays.fnum_array_swigregister
fnum_array_swigregister(fnum_array)
def debug_hexrays_ctree(*args):
"""
debug_hexrays_ctree(msg)
"""
return _ida_hexrays.debug_hexrays_ctree(*args)
def init_hexrays_plugin(*args):
"""
init_hexrays_plugin(flags=0) -> bool
Initialize your plugin for hex-rays decompiler. This function must be
called before calling any other decompiler function. It initializes
the pointer to the dispatcher.
@param flags: reserved, must be 0 (C++: int)
@return: true if the decompiler exists and the dispatcher pointer is
ready to use.
"""
return _ida_hexrays.init_hexrays_plugin(*args)
def get_widget_vdui(*args):
"""
get_widget_vdui(f) -> vdui_t
Get the 'vdui_t' instance associated to the TWidget
@param f: pointer to window (C++: TWidget *)
@return: a vdui_t *, or NULL
"""
return _ida_hexrays.get_widget_vdui(*args)
def boundaries_find(*args):
"""
boundaries_find(map, key) -> boundaries_iterator_t
Find the specified key in boundaries_t.
@param map (C++: const boundaries_t *)
@param key (C++: const cinsn_t *&)
"""
return _ida_hexrays.boundaries_find(*args)
def boundaries_insert(*args):
"""
boundaries_insert(map, key, val) -> boundaries_iterator_t
Insert new ( 'cinsn_t' *, 'rangeset_t' ) pair into boundaries_t.
@param map (C++: boundaries_t *)
@param key (C++: const cinsn_t *&)
@param val (C++: const rangeset_t &)
"""
return _ida_hexrays.boundaries_insert(*args)
def term_hexrays_plugin(*args):
"""
term_hexrays_plugin()
Stop working with hex-rays decompiler.
"""
return _ida_hexrays.term_hexrays_plugin(*args)
Hexrays_Hooks_swigregister = _ida_hexrays.Hexrays_Hooks_swigregister
Hexrays_Hooks_swigregister(Hexrays_Hooks)
uval_ivl_t_swigregister = _ida_hexrays.uval_ivl_t_swigregister
uval_ivl_t_swigregister(uval_ivl_t)
uval_ivl_ivlset_t_swigregister = _ida_hexrays.uval_ivl_ivlset_t_swigregister
uval_ivl_ivlset_t_swigregister(uval_ivl_ivlset_t)
array_of_ivlsets_swigregister = _ida_hexrays.array_of_ivlsets_swigregister
array_of_ivlsets_swigregister(array_of_ivlsets)
MAX_SUPPORTED_STACK_SIZE = _ida_hexrays.MAX_SUPPORTED_STACK_SIZE
def hexrays_alloc(*args):
"""
hexrays_alloc(size) -> void *
"""
return _ida_hexrays.hexrays_alloc(*args)
def hexrays_free(*args):
"""
hexrays_free(ptr)
"""
return _ida_hexrays.hexrays_free(*args)
MAX_VLR_SIZE = _ida_hexrays.MAX_VLR_SIZE
CMP_NZ = _ida_hexrays.CMP_NZ
CMP_Z = _ida_hexrays.CMP_Z
CMP_AE = _ida_hexrays.CMP_AE
CMP_B = _ida_hexrays.CMP_B
CMP_A = _ida_hexrays.CMP_A
CMP_BE = _ida_hexrays.CMP_BE
CMP_GT = _ida_hexrays.CMP_GT
CMP_GE = _ida_hexrays.CMP_GE
CMP_LT = _ida_hexrays.CMP_LT
CMP_LE = _ida_hexrays.CMP_LE
valrng_t_swigregister = _ida_hexrays.valrng_t_swigregister
valrng_t_swigregister(valrng_t)
cvar = _ida_hexrays.cvar
MAX_VALUE = cvar.MAX_VALUE
MAX_SVALUE = cvar.MAX_SVALUE
MIN_SVALUE = cvar.MIN_SVALUE
NO_ACCESS = _ida_hexrays.NO_ACCESS
WRITE_ACCESS = _ida_hexrays.WRITE_ACCESS
READ_ACCESS = _ida_hexrays.READ_ACCESS
RW_ACCESS = _ida_hexrays.RW_ACCESS
def is_may_access(*args):
"""
is_may_access(maymust) -> bool
"""
return _ida_hexrays.is_may_access(*args)
MERR_OK = _ida_hexrays.MERR_OK
MERR_BLOCK = _ida_hexrays.MERR_BLOCK
MERR_INTERR = _ida_hexrays.MERR_INTERR
MERR_INSN = _ida_hexrays.MERR_INSN
MERR_MEM = _ida_hexrays.MERR_MEM
MERR_BADBLK = _ida_hexrays.MERR_BADBLK
MERR_BADSP = _ida_hexrays.MERR_BADSP
MERR_PROLOG = _ida_hexrays.MERR_PROLOG
MERR_SWITCH = _ida_hexrays.MERR_SWITCH
MERR_EXCEPTION = _ida_hexrays.MERR_EXCEPTION
MERR_HUGESTACK = _ida_hexrays.MERR_HUGESTACK
MERR_LVARS = _ida_hexrays.MERR_LVARS
MERR_BITNESS = _ida_hexrays.MERR_BITNESS
MERR_BADCALL = _ida_hexrays.MERR_BADCALL
MERR_BADFRAME = _ida_hexrays.MERR_BADFRAME
MERR_UNKTYPE = _ida_hexrays.MERR_UNKTYPE
MERR_BADIDB = _ida_hexrays.MERR_BADIDB
MERR_SIZEOF = _ida_hexrays.MERR_SIZEOF
MERR_REDO = _ida_hexrays.MERR_REDO
MERR_CANCELED = _ida_hexrays.MERR_CANCELED
MERR_RECDEPTH = _ida_hexrays.MERR_RECDEPTH
MERR_OVERLAP = _ida_hexrays.MERR_OVERLAP
MERR_PARTINIT = _ida_hexrays.MERR_PARTINIT
MERR_COMPLEX = _ida_hexrays.MERR_COMPLEX
MERR_LICENSE = _ida_hexrays.MERR_LICENSE
MERR_ONLY32 = _ida_hexrays.MERR_ONLY32
MERR_ONLY64 = _ida_hexrays.MERR_ONLY64
MERR_BUSY = _ida_hexrays.MERR_BUSY
MERR_FARPTR = _ida_hexrays.MERR_FARPTR
MERR_EXTERN = _ida_hexrays.MERR_EXTERN
MERR_FUNCSIZE = _ida_hexrays.MERR_FUNCSIZE
MERR_BADRANGES = _ida_hexrays.MERR_BADRANGES
MERR_STOP = _ida_hexrays.MERR_STOP
MERR_MAX_ERR = _ida_hexrays.MERR_MAX_ERR
MERR_LOOP = _ida_hexrays.MERR_LOOP
def get_merror_desc(*args):
"""
get_merror_desc(code, mba) -> ea_t
Get textual description of an error code
@param code: Microcode error codes (C++: merror_t)
@param mba: the microcode array (C++: mbl_array_t *)
@return: the error address
"""
return _ida_hexrays.get_merror_desc(*args)
def reg2mreg(*args):
"""
reg2mreg(reg) -> mreg_t
Map a processor register to microregister.
@param reg: processor register number (C++: int)
@return: microregister register id or mr_none
"""
return _ida_hexrays.reg2mreg(*args)
def mreg2reg(*args):
"""
mreg2reg(reg, width) -> int
Map a microregister to processor register.
@param reg: microregister number (C++: mreg_t)
@param width: size of microregister in bytes (C++: int)
@return: processor register id or -1
"""
return _ida_hexrays.mreg2reg(*args)
optinsn_t_swigregister = _ida_hexrays.optinsn_t_swigregister
optinsn_t_swigregister(optinsn_t)
MUST_ACCESS = cvar.MUST_ACCESS
MAY_ACCESS = cvar.MAY_ACCESS
MAYMUST_ACCESS_MASK = cvar.MAYMUST_ACCESS_MASK
ONE_ACCESS_TYPE = cvar.ONE_ACCESS_TYPE
INCLUDE_SPOILED_REGS = cvar.INCLUDE_SPOILED_REGS
EXCLUDE_PASS_REGS = cvar.EXCLUDE_PASS_REGS
FULL_XDSU = cvar.FULL_XDSU
WITH_ASSERTS = cvar.WITH_ASSERTS
EXCLUDE_VOLATILE = cvar.EXCLUDE_VOLATILE
INCLUDE_UNUSED_SRC = cvar.INCLUDE_UNUSED_SRC
INCLUDE_DEAD_RETREGS = cvar.INCLUDE_DEAD_RETREGS
INCLUDE_RESTRICTED = cvar.INCLUDE_RESTRICTED
CALL_SPOILS_ONLY_ARGS = cvar.CALL_SPOILS_ONLY_ARGS
optblock_t_swigregister = _ida_hexrays.optblock_t_swigregister
optblock_t_swigregister(optblock_t)
m_nop = _ida_hexrays.m_nop
m_stx = _ida_hexrays.m_stx
m_ldx = _ida_hexrays.m_ldx
m_ldc = _ida_hexrays.m_ldc
m_mov = _ida_hexrays.m_mov
m_neg = _ida_hexrays.m_neg
m_lnot = _ida_hexrays.m_lnot
m_bnot = _ida_hexrays.m_bnot
m_xds = _ida_hexrays.m_xds
m_xdu = _ida_hexrays.m_xdu
m_low = _ida_hexrays.m_low
m_high = _ida_hexrays.m_high
m_add = _ida_hexrays.m_add
m_sub = _ida_hexrays.m_sub
m_mul = _ida_hexrays.m_mul
m_udiv = _ida_hexrays.m_udiv
m_sdiv = _ida_hexrays.m_sdiv
m_umod = _ida_hexrays.m_umod
m_smod = _ida_hexrays.m_smod
m_or = _ida_hexrays.m_or
m_and = _ida_hexrays.m_and
m_xor = _ida_hexrays.m_xor
m_shl = _ida_hexrays.m_shl
m_shr = _ida_hexrays.m_shr
m_sar = _ida_hexrays.m_sar
m_cfadd = _ida_hexrays.m_cfadd
m_ofadd = _ida_hexrays.m_ofadd
m_cfshl = _ida_hexrays.m_cfshl
m_cfshr = _ida_hexrays.m_cfshr
m_sets = _ida_hexrays.m_sets
m_seto = _ida_hexrays.m_seto
m_setp = _ida_hexrays.m_setp
m_setnz = _ida_hexrays.m_setnz
m_setz = _ida_hexrays.m_setz
m_setae = _ida_hexrays.m_setae
m_setb = _ida_hexrays.m_setb
m_seta = _ida_hexrays.m_seta
m_setbe = _ida_hexrays.m_setbe
m_setg = _ida_hexrays.m_setg
m_setge = _ida_hexrays.m_setge
m_setl = _ida_hexrays.m_setl
m_setle = _ida_hexrays.m_setle
m_jcnd = _ida_hexrays.m_jcnd
m_jnz = _ida_hexrays.m_jnz
m_jz = _ida_hexrays.m_jz
m_jae = _ida_hexrays.m_jae
m_jb = _ida_hexrays.m_jb
m_ja = _ida_hexrays.m_ja
m_jbe = _ida_hexrays.m_jbe
m_jg = _ida_hexrays.m_jg
m_jge = _ida_hexrays.m_jge
m_jl = _ida_hexrays.m_jl
m_jle = _ida_hexrays.m_jle
m_jtbl = _ida_hexrays.m_jtbl
m_ijmp = _ida_hexrays.m_ijmp
m_goto = _ida_hexrays.m_goto
m_call = _ida_hexrays.m_call
m_icall = _ida_hexrays.m_icall
m_ret = _ida_hexrays.m_ret
m_push = _ida_hexrays.m_push
m_pop = _ida_hexrays.m_pop
m_und = _ida_hexrays.m_und
m_ext = _ida_hexrays.m_ext
m_f2i = _ida_hexrays.m_f2i
m_f2u = _ida_hexrays.m_f2u
m_i2f = _ida_hexrays.m_i2f
m_u2f = _ida_hexrays.m_u2f
m_f2f = _ida_hexrays.m_f2f
m_fneg = _ida_hexrays.m_fneg
m_fadd = _ida_hexrays.m_fadd
m_fsub = _ida_hexrays.m_fsub
m_fmul = _ida_hexrays.m_fmul
m_fdiv = _ida_hexrays.m_fdiv
def must_mcode_close_block(*args):
"""
must_mcode_close_block(mcode, including_calls) -> bool
Must an instruction with the given opcode be the last one in a block?
Such opcodes are called closing opcodes.
@param mcode: instruction opcode (C++: mcode_t)
@param including_calls: should m_call/m_icall be considered as the
closing opcodes? If this function returns
true, the opcode cannot appear in the middle
of a block. Calls are a special case because
before MMAT_CALLS they are closing opcodes.
Afteer MMAT_CALLS that are not considered as
closing opcodes. (C++: bool)
"""
return _ida_hexrays.must_mcode_close_block(*args)
def is_mcode_propagatable(*args):
"""
is_mcode_propagatable(mcode) -> bool
May opcode be propagated? Such opcodes can be used in sub-instructions
(nested instructions) There is a handful of non-propagatable opcodes,
like jumps, ret, nop, etc All other regular opcodes are propagatable
and may appear in a nested instruction.
@param mcode (C++: mcode_t)
"""
return _ida_hexrays.is_mcode_propagatable(*args)
def is_mcode_addsub(*args):
"""
is_mcode_addsub(mcode) -> bool
"""
return _ida_hexrays.is_mcode_addsub(*args)
def is_mcode_xdsu(*args):
"""
is_mcode_xdsu(mcode) -> bool
"""
return _ida_hexrays.is_mcode_xdsu(*args)
def is_mcode_set(*args):
"""
is_mcode_set(mcode) -> bool
"""
return _ida_hexrays.is_mcode_set(*args)
def is_mcode_set1(*args):
"""
is_mcode_set1(mcode) -> bool
"""
return _ida_hexrays.is_mcode_set1(*args)
def is_mcode_j1(*args):
"""
is_mcode_j1(mcode) -> bool
"""
return _ida_hexrays.is_mcode_j1(*args)
def is_mcode_jcond(*args):
"""
is_mcode_jcond(mcode) -> bool
"""
return _ida_hexrays.is_mcode_jcond(*args)
def is_mcode_convertible_to_jmp(*args):
"""
is_mcode_convertible_to_jmp(mcode) -> bool
"""
return _ida_hexrays.is_mcode_convertible_to_jmp(*args)
def is_mcode_convertible_to_set(*args):
"""
is_mcode_convertible_to_set(mcode) -> bool
"""
return _ida_hexrays.is_mcode_convertible_to_set(*args)
def is_mcode_call(*args):
"""
is_mcode_call(mcode) -> bool
"""
return _ida_hexrays.is_mcode_call(*args)
def is_mcode_fpu(*args):
"""
is_mcode_fpu(mcode) -> bool
"""
return _ida_hexrays.is_mcode_fpu(*args)
def is_mcode_commutative(*args):
"""
is_mcode_commutative(mcode) -> bool
"""
return _ida_hexrays.is_mcode_commutative(*args)
def is_mcode_shift(*args):
"""
is_mcode_shift(mcode) -> bool
"""
return _ida_hexrays.is_mcode_shift(*args)
def is_mcode_divmod(*args):
"""
is_mcode_divmod(op) -> bool
"""
return _ida_hexrays.is_mcode_divmod(*args)
def set2jcnd(*args):
"""
set2jcnd(code) -> mcode_t
"""
return _ida_hexrays.set2jcnd(*args)
def jcnd2set(*args):
"""
jcnd2set(code) -> mcode_t
"""
return _ida_hexrays.jcnd2set(*args)
def negate_mcode_relation(*args):
"""
negate_mcode_relation(code) -> mcode_t
"""
return _ida_hexrays.negate_mcode_relation(*args)
def swap_mcode_relation(*args):
"""
swap_mcode_relation(code) -> mcode_t
"""
return _ida_hexrays.swap_mcode_relation(*args)
def get_signed_mcode(*args):
"""
get_signed_mcode(code) -> mcode_t
"""
return _ida_hexrays.get_signed_mcode(*args)
def get_unsigned_mcode(*args):
"""
get_unsigned_mcode(code) -> mcode_t
"""
return _ida_hexrays.get_unsigned_mcode(*args)
def is_signed_mcode(*args):
"""
is_signed_mcode(code) -> bool
"""
return _ida_hexrays.is_signed_mcode(*args)
def is_unsigned_mcode(*args):
"""
is_unsigned_mcode(code) -> bool
"""
return _ida_hexrays.is_unsigned_mcode(*args)
def mcode_modifies_d(*args):
"""
mcode_modifies_d(mcode) -> bool
"""
return _ida_hexrays.mcode_modifies_d(*args)
operand_locator_t_swigregister = _ida_hexrays.operand_locator_t_swigregister
operand_locator_t_swigregister(operand_locator_t)
mr_none = cvar.mr_none
mr_cf = cvar.mr_cf
mr_zf = cvar.mr_zf
mr_sf = cvar.mr_sf
mr_of = cvar.mr_of
mr_pf = cvar.mr_pf
cc_count = cvar.cc_count
mr_cc = cvar.mr_cc
mr_first = cvar.mr_first
number_format_t_swigregister = _ida_hexrays.number_format_t_swigregister
number_format_t_swigregister(number_format_t)
NF_FIXED = _ida_hexrays.NF_FIXED
"""
number format has been defined by the user
"""
NF_NEGDONE = _ida_hexrays.NF_NEGDONE
"""
temporary internal bit: negation has been performed
"""
NF_BINVDONE = _ida_hexrays.NF_BINVDONE
"""
temporary internal bit: inverting bits is done
"""
NF_NEGATE = _ida_hexrays.NF_NEGATE
"""
The user asked to negate the constant.
"""
NF_BITNOT = _ida_hexrays.NF_BITNOT
"""
The user asked to invert bits of the constant.
"""
NF_STROFF = _ida_hexrays.NF_STROFF
"""
internal bit: used as stroff, valid iff 'is_stroff()'
"""
vd_printer_t_swigregister = _ida_hexrays.vd_printer_t_swigregister
vd_printer_t_swigregister(vd_printer_t)
vc_printer_t_swigregister = _ida_hexrays.vc_printer_t_swigregister
vc_printer_t_swigregister(vc_printer_t)
qstring_printer_t_swigregister = _ida_hexrays.qstring_printer_t_swigregister
qstring_printer_t_swigregister(qstring_printer_t)
def dstr(*args):
"""
dstr(tif) -> char const *
Print the specified type info. This function can be used from a
debugger by typing "tif->dstr()"
@param tif (C++: const tinfo_t *)
"""
return _ida_hexrays.dstr(*args)
def is_type_correct(*args):
"""
is_type_correct(ptr) -> bool
Verify a type string.
@param ptr (C++: const type_t *)
@return: true if type string is correct
"""
return _ida_hexrays.is_type_correct(*args)
def is_small_udt(*args):
"""
is_small_udt(tif) -> bool
Is a small structure or union?
@param tif (C++: const tinfo_t &)
@return: true if the type is a small UDT (user defined type). Small
UDTs fit into a register (or pair or registers) as a rule.
"""
return _ida_hexrays.is_small_udt(*args)
def is_nonbool_type(*args):
"""
is_nonbool_type(type) -> bool
Is definitely a non-boolean type?
@param type (C++: const tinfo_t &)
@return: true if the type is a non-boolean type (non bool and well
defined)
"""
return _ida_hexrays.is_nonbool_type(*args)
def is_bool_type(*args):
"""
is_bool_type(type) -> bool
Is a boolean type?
@param type (C++: const tinfo_t &)
@return: true if the type is a boolean type
"""
return _ida_hexrays.is_bool_type(*args)
def is_ptr_or_array(*args):
"""
is_ptr_or_array(t) -> bool
Is a pointer or array type?
@param t (C++: type_t)
"""
return _ida_hexrays.is_ptr_or_array(*args)
def is_paf(*args):
"""
is_paf(t) -> bool
Is a pointer, array, or function type?
@param t (C++: type_t)
"""
return _ida_hexrays.is_paf(*args)
def is_inplace_def(*args):
"""
is_inplace_def(type) -> bool
Is struct/union/enum definition (not declaration)?
@param type (C++: const tinfo_t &)
"""
return _ida_hexrays.is_inplace_def(*args)
def partial_type_num(*args):
"""
partial_type_num(type) -> int
Calculate number of partial subtypes.
@param type (C++: const tinfo_t &)
@return: number of partial subtypes. The bigger is this number, the
uglier is the type.
"""
return _ida_hexrays.partial_type_num(*args)
def get_float_type(*args):
"""
get_float_type(width) -> tinfo_t
Get a type of a floating point value with the specified width
@param width: width of the desired type (C++: int)
@return: type info object
"""
return _ida_hexrays.get_float_type(*args)
def get_int_type_by_width_and_sign(*args):
"""
get_int_type_by_width_and_sign(srcwidth, sign) -> tinfo_t
Create a type info by width and sign. Returns a simple type (examples:
int, short) with the given width and sign.
@param srcwidth: size of the type in bytes (C++: int)
@param sign: sign of the type (C++: type_sign_t)
"""
return _ida_hexrays.get_int_type_by_width_and_sign(*args)
def get_unk_type(*args):
"""
get_unk_type(size) -> tinfo_t
Create a partial type info by width. Returns a partially defined type
(examples: _DWORD, _BYTE) with the given width.
@param size: size of the type in bytes (C++: int)
"""
return _ida_hexrays.get_unk_type(*args)
def dummy_ptrtype(*args):
"""
dummy_ptrtype(ptrsize, isfp) -> tinfo_t
Generate a dummy pointer type
@param ptrsize: size of pointed object (C++: int)
@param isfp: is floating point object? (C++: bool)
"""
return _ida_hexrays.dummy_ptrtype(*args)
def get_member_type(*args):
"""
get_member_type(mptr, type) -> bool
Get type of a structure field. This function performs validity checks
of the field type. Wrong types are rejected.
@param mptr: structure field (C++: const member_t *)
@param type: pointer to the variable where the type is returned. This
parameter can be NULL. (C++: tinfo_t *)
@return: false if failed
"""
return _ida_hexrays.get_member_type(*args)
def make_pointer(*args):
"""
make_pointer(type) -> tinfo_t
Create a pointer type. This function performs the following
conversion: "type" -> "type*"
@param type: object type. (C++: const tinfo_t &)
@return: "type*". for example, if 'char' is passed as the argument,
"""
return _ida_hexrays.make_pointer(*args)
def create_typedef(*args):
"""
create_typedef(name) -> tinfo_t
create_typedef(n) -> tinfo_t
Create a reference to a named type.
@param name: type name (C++: const char *)
@return: type which refers to the specified name. For example, if name
is "DWORD", the type info which refers to "DWORD" is created.
"""
return _ida_hexrays.create_typedef(*args)
GUESSED_NONE = _ida_hexrays.GUESSED_NONE
GUESSED_WEAK = _ida_hexrays.GUESSED_WEAK
GUESSED_FUNC = _ida_hexrays.GUESSED_FUNC
GUESSED_DATA = _ida_hexrays.GUESSED_DATA
TS_NOELL = _ida_hexrays.TS_NOELL
TS_SHRINK = _ida_hexrays.TS_SHRINK
TS_DONTREF = _ida_hexrays.TS_DONTREF
TS_MASK = _ida_hexrays.TS_MASK
def get_type(*args):
"""
get_type(id, tif, guess) -> bool
Get a global type. Global types are types of addressable objects and
struct/union/enum types
@param id: address or id of the object (C++: uval_t)
@param tif: buffer for the answer (C++: tinfo_t *)
@param guess: what kind of types to consider (C++: type_source_t)
@return: success
"""
return _ida_hexrays.get_type(*args)
def set_type(*args):
"""
set_type(id, tif, source, force=False) -> bool
Set a global type.
@param id: address or id of the object (C++: uval_t)
@param tif: new type info (C++: const tinfo_t &)
@param source: where the type comes from (C++: type_source_t)
@param force: true means to set the type as is, false means to merge
the new type with the possibly existing old type info.
(C++: bool)
@return: success
"""
return _ida_hexrays.set_type(*args)
vdloc_t_swigregister = _ida_hexrays.vdloc_t_swigregister
vdloc_t_swigregister(vdloc_t)
def print_vdloc(*args):
"""
print_vdloc(loc, nbytes)
Print vdloc. Since vdloc does not always carry the size info, we pass
it as NBYTES..
@param loc (C++: const vdloc_t &)
@param nbytes (C++: int)
"""
return _ida_hexrays.print_vdloc(*args)
def arglocs_overlap(*args):
"""
arglocs_overlap(loc1, w1, loc2, w2) -> bool
Do two arglocs overlap?
@param loc1 (C++: const vdloc_t &)
@param w1 (C++: size_t)
@param loc2 (C++: const vdloc_t &)
@param w2 (C++: size_t)
"""
return _ida_hexrays.arglocs_overlap(*args)
lvar_locator_t_swigregister = _ida_hexrays.lvar_locator_t_swigregister
lvar_locator_t_swigregister(lvar_locator_t)
lvar_t_swigregister = _ida_hexrays.lvar_t_swigregister
lvar_t_swigregister(lvar_t)
SVW_INT = _ida_hexrays.SVW_INT
SVW_FLOAT = _ida_hexrays.SVW_FLOAT
SVW_SOFT = _ida_hexrays.SVW_SOFT
lvars_t_swigregister = _ida_hexrays.lvars_t_swigregister
lvars_t_swigregister(lvars_t)
lvar_saved_info_t_swigregister = _ida_hexrays.lvar_saved_info_t_swigregister
lvar_saved_info_t_swigregister(lvar_saved_info_t)
LVINF_KEEP = _ida_hexrays.LVINF_KEEP
"""
preserve saved user settings regardless of vars for example, if a var
loses all its user-defined attributes or even gets destroyed, keep its
'lvar_saved_info_t' . this is used for ephemeral variables that get
destroyed by macro recognition.
"""
LVINF_FORCE = _ida_hexrays.LVINF_FORCE
"""
force allocation of a new variable. forces the decompiler to create a
new variable at ll.defea
"""
LVINF_NOPTR = _ida_hexrays.LVINF_NOPTR
"""
variable type should not be a pointer
"""
lvar_uservec_t_swigregister = _ida_hexrays.lvar_uservec_t_swigregister
lvar_uservec_t_swigregister(lvar_uservec_t)
ULV_PRECISE_DEFEA = _ida_hexrays.ULV_PRECISE_DEFEA
"""
Use precise defea's for lvar locations.
"""
def restore_user_lvar_settings(*args):
"""
restore_user_lvar_settings(lvinf, func_ea) -> bool
Restore user defined local variable settings in the database.
@param lvinf: ptr to output buffer (C++: lvar_uservec_t *)
@param func_ea: entry address of the function (C++: ea_t)
@return: success
"""
return _ida_hexrays.restore_user_lvar_settings(*args)
def save_user_lvar_settings(*args):
"""
save_user_lvar_settings(func_ea, lvinf)
Save user defined local variable settings into the database.
@param func_ea: entry address of the function (C++: ea_t)
@param lvinf: user-specified info about local variables (C++: const
lvar_uservec_t &)
"""
return _ida_hexrays.save_user_lvar_settings(*args)
user_lvar_modifier_t_swigregister = _ida_hexrays.user_lvar_modifier_t_swigregister
user_lvar_modifier_t_swigregister(user_lvar_modifier_t)
def modify_user_lvars(*args):
"""
modify_user_lvars(entry_ea, mlv) -> bool
Modify saved local variable settings.
@param entry_ea: function start address (C++: ea_t)
@param mlv: local variable modifier (C++: user_lvar_modifier_t &)
@return: true if modified variables
"""
return _ida_hexrays.modify_user_lvars(*args)
udcall_t_swigregister = _ida_hexrays.udcall_t_swigregister
udcall_t_swigregister(udcall_t)
def restore_user_defined_calls(*args):
"""
restore_user_defined_calls(udcalls, func_ea) -> bool
Restore user defined function calls from the database.
@param udcalls: ptr to output buffer (C++: udcall_map_t *)
@param func_ea: entry address of the function (C++: ea_t)
@return: success
"""
return _ida_hexrays.restore_user_defined_calls(*args)
def save_user_defined_calls(*args):
"""
save_user_defined_calls(func_ea, udcalls)
Save user defined local function calls into the database.
@param func_ea: entry address of the function (C++: ea_t)
@param udcalls: user-specified info about user defined function calls
(C++: const udcall_map_t &)
"""
return _ida_hexrays.save_user_defined_calls(*args)
def parse_user_call(*args):
"""
parse_user_call(udc, decl, silent) -> bool
Convert function type declaration into internal structure
@param udc: - pointer to output structure (C++: udcall_t *)
@param decl: - function type declaration (C++: const char *)
@param silent: - if TRUE: do not show warning in case of incorrect
type (C++: bool)
@return: success
"""
return _ida_hexrays.parse_user_call(*args)
def convert_to_user_call(*args):
"""
convert_to_user_call(udc, cdg) -> merror_t
try to generate user-defined call for an instruction
@param udc (C++: const udcall_t &)
@param cdg (C++: codegen_t &)
@return: Microcode error codes code: MERR_OK - user-defined call
generated else - error (MERR_INSN == inacceptable udc.tif)
"""
return _ida_hexrays.convert_to_user_call(*args)
microcode_filter_t_swigregister = _ida_hexrays.microcode_filter_t_swigregister
microcode_filter_t_swigregister(microcode_filter_t)
def install_microcode_filter(*args):
"""
install_microcode_filter(filter, install=True)
register/unregister non-standard microcode generator
@param filter: - microcode generator object (C++: microcode_filter_t
*)
@param install: - TRUE - register the object, FALSE - unregister (C++:
bool)
"""
return _ida_hexrays.install_microcode_filter(*args)
udc_filter_t_swigregister = _ida_hexrays.udc_filter_t_swigregister
udc_filter_t_swigregister(udc_filter_t)
bitset_t_swigregister = _ida_hexrays.bitset_t_swigregister
bitset_t_swigregister(bitset_t)
bitset_width = cvar.bitset_width
bitset_align = cvar.bitset_align
bitset_shift = cvar.bitset_shift
ivl_t_swigregister = _ida_hexrays.ivl_t_swigregister
ivl_t_swigregister(ivl_t)
ivl_with_name_t_swigregister = _ida_hexrays.ivl_with_name_t_swigregister
ivl_with_name_t_swigregister(ivl_with_name_t)
ivlset_t_swigregister = _ida_hexrays.ivlset_t_swigregister
ivlset_t_swigregister(ivlset_t)
def get_mreg_name(*args):
"""
get_mreg_name(bit, width, ud=None) -> int
"""
return _ida_hexrays.get_mreg_name(*args)
rlist_t_swigregister = _ida_hexrays.rlist_t_swigregister
rlist_t_swigregister(rlist_t)
mlist_t_swigregister = _ida_hexrays.mlist_t_swigregister
mlist_t_swigregister(mlist_t)
simple_graph_t_swigregister = _ida_hexrays.simple_graph_t_swigregister
simple_graph_t_swigregister(simple_graph_t)
op_parent_info_t_swigregister = _ida_hexrays.op_parent_info_t_swigregister
op_parent_info_t_swigregister(op_parent_info_t)
minsn_visitor_t_swigregister = _ida_hexrays.minsn_visitor_t_swigregister
minsn_visitor_t_swigregister(minsn_visitor_t)
mop_visitor_t_swigregister = _ida_hexrays.mop_visitor_t_swigregister
mop_visitor_t_swigregister(mop_visitor_t)
scif_visitor_t_swigregister = _ida_hexrays.scif_visitor_t_swigregister
scif_visitor_t_swigregister(scif_visitor_t)
mlist_mop_visitor_t_swigregister = _ida_hexrays.mlist_mop_visitor_t_swigregister
mlist_mop_visitor_t_swigregister(mlist_mop_visitor_t)
lvar_ref_t_swigregister = _ida_hexrays.lvar_ref_t_swigregister
lvar_ref_t_swigregister(lvar_ref_t)
mop_z = cvar.mop_z
mop_r = cvar.mop_r
mop_n = cvar.mop_n
mop_str = cvar.mop_str
mop_d = cvar.mop_d
mop_S = cvar.mop_S
mop_v = cvar.mop_v
mop_b = cvar.mop_b
mop_f = cvar.mop_f
mop_l = cvar.mop_l
mop_a = cvar.mop_a
mop_h = cvar.mop_h
mop_c = cvar.mop_c
mop_fn = cvar.mop_fn
mop_p = cvar.mop_p
mop_sc = cvar.mop_sc
NOSIZE = cvar.NOSIZE
stkvar_ref_t_swigregister = _ida_hexrays.stkvar_ref_t_swigregister
stkvar_ref_t_swigregister(stkvar_ref_t)
scif_t_swigregister = _ida_hexrays.scif_t_swigregister
scif_t_swigregister(scif_t)
mnumber_t_swigregister = _ida_hexrays.mnumber_t_swigregister
mnumber_t_swigregister(mnumber_t)
fnumber_t_swigregister = _ida_hexrays.fnumber_t_swigregister
fnumber_t_swigregister(fnumber_t)
SHINS_NUMADDR = _ida_hexrays.SHINS_NUMADDR
"""
display definition addresses for numbers
"""
SHINS_VALNUM = _ida_hexrays.SHINS_VALNUM
"""
display value numbers
"""
SHINS_SHORT = _ida_hexrays.SHINS_SHORT
"""
do not display use-def chains and other attrs
"""
SHINS_LDXEA = _ida_hexrays.SHINS_LDXEA
"""
display address of ldx expressions (not used)
"""
NO_SIDEFF = _ida_hexrays.NO_SIDEFF
WITH_SIDEFF = _ida_hexrays.WITH_SIDEFF
ONLY_SIDEFF = _ida_hexrays.ONLY_SIDEFF
ANY_REGSIZE = _ida_hexrays.ANY_REGSIZE
mop_t_swigregister = _ida_hexrays.mop_t_swigregister
mop_t_swigregister(mop_t)
MAX_OPSIZE = cvar.MAX_OPSIZE
DOUBLE_OPSIZE = cvar.DOUBLE_OPSIZE
OPROP_IMPDONE = _ida_hexrays.OPROP_IMPDONE
"""
imported operand (a pointer) has been dereferenced
"""
OPROP_UDT = _ida_hexrays.OPROP_UDT
"""
a struct or union
"""
OPROP_FLOAT = _ida_hexrays.OPROP_FLOAT
"""
possibly floating value
"""
OPROP_CCFLAGS = _ida_hexrays.OPROP_CCFLAGS
"""
condition codes register value
"""
OPROP_UDEFVAL = _ida_hexrays.OPROP_UDEFVAL
"""
uses undefined value
"""
def lexcompare(*args):
"""
lexcompare(a, b) -> int
"""
return _ida_hexrays.lexcompare(*args)
mop_pair_t_swigregister = _ida_hexrays.mop_pair_t_swigregister
mop_pair_t_swigregister(mop_pair_t)
mop_addr_t_swigregister = _ida_hexrays.mop_addr_t_swigregister
mop_addr_t_swigregister(mop_addr_t)
mcallarg_t_swigregister = _ida_hexrays.mcallarg_t_swigregister
mcallarg_t_swigregister(mcallarg_t)
ROLE_UNK = _ida_hexrays.ROLE_UNK
ROLE_EMPTY = _ida_hexrays.ROLE_EMPTY
ROLE_MEMSET = _ida_hexrays.ROLE_MEMSET
ROLE_MEMSET32 = _ida_hexrays.ROLE_MEMSET32
ROLE_MEMSET64 = _ida_hexrays.ROLE_MEMSET64
ROLE_MEMCPY = _ida_hexrays.ROLE_MEMCPY
ROLE_STRCPY = _ida_hexrays.ROLE_STRCPY
ROLE_STRLEN = _ida_hexrays.ROLE_STRLEN
ROLE_STRCAT = _ida_hexrays.ROLE_STRCAT
ROLE_TAIL = _ida_hexrays.ROLE_TAIL
ROLE_BUG = _ida_hexrays.ROLE_BUG
ROLE_ALLOCA = _ida_hexrays.ROLE_ALLOCA
ROLE_BSWAP = _ida_hexrays.ROLE_BSWAP
ROLE_PRESENT = _ida_hexrays.ROLE_PRESENT
ROLE_CONTAINING_RECORD = _ida_hexrays.ROLE_CONTAINING_RECORD
ROLE_FASTFAIL = _ida_hexrays.ROLE_FASTFAIL
ROLE_READFLAGS = _ida_hexrays.ROLE_READFLAGS
ROLE_IS_MUL_OK = _ida_hexrays.ROLE_IS_MUL_OK
ROLE_SATURATED_MUL = _ida_hexrays.ROLE_SATURATED_MUL
ROLE_BITTEST = _ida_hexrays.ROLE_BITTEST
ROLE_BITTESTANDSET = _ida_hexrays.ROLE_BITTESTANDSET
ROLE_BITTESTANDRESET = _ida_hexrays.ROLE_BITTESTANDRESET
ROLE_BITTESTANDCOMPLEMENT = _ida_hexrays.ROLE_BITTESTANDCOMPLEMENT
ROLE_VA_ARG = _ida_hexrays.ROLE_VA_ARG
ROLE_VA_COPY = _ida_hexrays.ROLE_VA_COPY
ROLE_VA_START = _ida_hexrays.ROLE_VA_START
ROLE_VA_END = _ida_hexrays.ROLE_VA_END
ROLE_ROL = _ida_hexrays.ROLE_ROL
ROLE_ROR = _ida_hexrays.ROLE_ROR
ROLE_CFSUB3 = _ida_hexrays.ROLE_CFSUB3
ROLE_OFSUB3 = _ida_hexrays.ROLE_OFSUB3
ROLE_ABS = _ida_hexrays.ROLE_ABS
FUNC_NAME_MEMCPY = _ida_hexrays.FUNC_NAME_MEMCPY
FUNC_NAME_MEMSET = _ida_hexrays.FUNC_NAME_MEMSET
FUNC_NAME_MEMSET32 = _ida_hexrays.FUNC_NAME_MEMSET32
FUNC_NAME_MEMSET64 = _ida_hexrays.FUNC_NAME_MEMSET64
FUNC_NAME_STRCPY = _ida_hexrays.FUNC_NAME_STRCPY
FUNC_NAME_STRLEN = _ida_hexrays.FUNC_NAME_STRLEN
FUNC_NAME_STRCAT = _ida_hexrays.FUNC_NAME_STRCAT
FUNC_NAME_TAIL = _ida_hexrays.FUNC_NAME_TAIL
FUNC_NAME_VA_ARG = _ida_hexrays.FUNC_NAME_VA_ARG
FUNC_NAME_EMPTY = _ida_hexrays.FUNC_NAME_EMPTY
FUNC_NAME_PRESENT = _ida_hexrays.FUNC_NAME_PRESENT
FUNC_NAME_CONTAINING_RECORD = _ida_hexrays.FUNC_NAME_CONTAINING_RECORD
mcallinfo_t_swigregister = _ida_hexrays.mcallinfo_t_swigregister
mcallinfo_t_swigregister(mcallinfo_t)
FCI_PROP = _ida_hexrays.FCI_PROP
"""
call has been propagated
"""
FCI_DEAD = _ida_hexrays.FCI_DEAD
"""
some return registers were determined dead
"""
FCI_FINAL = _ida_hexrays.FCI_FINAL
"""
call type is final, should not be changed
"""
FCI_NORET = _ida_hexrays.FCI_NORET
"""
call does not return
"""
FCI_PURE = _ida_hexrays.FCI_PURE
"""
pure function
"""
FCI_NOSIDE = _ida_hexrays.FCI_NOSIDE
"""
call does not have side effects
"""
FCI_SPLOK = _ida_hexrays.FCI_SPLOK
"""
spoiled/visible_memory lists have been optimized. for some functions
we can reduce them as soon as information about the arguments becomes
available. in order not to try optimize them again we use this bit.
"""
FCI_HASCALL = _ida_hexrays.FCI_HASCALL
"""
A function is an synthetic helper combined from several instructions
and at least one of them was a call to a real functions
"""
FCI_HASFMT = _ida_hexrays.FCI_HASFMT
"""
printf- or scanf-style format string
A variadic function with recognized
"""
mcases_t_swigregister = _ida_hexrays.mcases_t_swigregister
mcases_t_swigregister(mcases_t)
voff_t_swigregister = _ida_hexrays.voff_t_swigregister
voff_t_swigregister(voff_t)
vivl_t_swigregister = _ida_hexrays.vivl_t_swigregister
vivl_t_swigregister(vivl_t)
chain_t_swigregister = _ida_hexrays.chain_t_swigregister
chain_t_swigregister(chain_t)
CHF_INITED = _ida_hexrays.CHF_INITED
"""
is chain initialized? (valid only after lvar allocation)
"""
CHF_REPLACED = _ida_hexrays.CHF_REPLACED
"""
chain operands have been replaced?
"""
CHF_OVER = _ida_hexrays.CHF_OVER
"""
overlapped chain
"""
CHF_FAKE = _ida_hexrays.CHF_FAKE
"""
fake chain created by widen_chains()
"""
CHF_PASSTHRU = _ida_hexrays.CHF_PASSTHRU
"""
pass-thru chain, must use the input variable to the block
"""
CHF_TERM = _ida_hexrays.CHF_TERM
"""
terminating chain; the variable does not survive across the block
"""
SIZEOF_BLOCK_CHAINS = _ida_hexrays.SIZEOF_BLOCK_CHAINS
block_chains_t_swigregister = _ida_hexrays.block_chains_t_swigregister
block_chains_t_swigregister(block_chains_t)
chain_visitor_t_swigregister = _ida_hexrays.chain_visitor_t_swigregister
chain_visitor_t_swigregister(chain_visitor_t)
graph_chains_t_swigregister = _ida_hexrays.graph_chains_t_swigregister
graph_chains_t_swigregister(graph_chains_t)
GCA_EMPTY = _ida_hexrays.GCA_EMPTY
"""
include empty chains
"""
GCA_SPEC = _ida_hexrays.GCA_SPEC
"""
include chains for special registers
"""
GCA_ALLOC = _ida_hexrays.GCA_ALLOC
"""
enumerate only allocated chains
"""
GCA_NALLOC = _ida_hexrays.GCA_NALLOC
"""
enumerate only non-allocated chains
"""
GCA_OFIRST = _ida_hexrays.GCA_OFIRST
"""
consider only chains of the first block
"""
GCA_OLAST = _ida_hexrays.GCA_OLAST
"""
consider only chains of the last block
"""
minsn_t_swigregister = _ida_hexrays.minsn_t_swigregister
minsn_t_swigregister(minsn_t)
IPROP_OPTIONAL = _ida_hexrays.IPROP_OPTIONAL
"""
optional instruction
"""
IPROP_PERSIST = _ida_hexrays.IPROP_PERSIST
"""
persistent insn; they are not destroyed
"""
IPROP_WILDMATCH = _ida_hexrays.IPROP_WILDMATCH
"""
match multiple insns
"""
IPROP_CLNPOP = _ida_hexrays.IPROP_CLNPOP
"""
(e.g. "pop ecx" is often used for that)
the purpose of the instruction is to clean stack
"""
IPROP_FPINSN = _ida_hexrays.IPROP_FPINSN
"""
floating point insn
"""
IPROP_FARCALL = _ida_hexrays.IPROP_FARCALL
"""
call of a far function using push cs/call sequence
"""
IPROP_TAILCALL = _ida_hexrays.IPROP_TAILCALL
"""
tail call
"""
IPROP_ASSERT = _ida_hexrays.IPROP_ASSERT
"""
assertion: usually mov #val, op. assertions are used to help the
optimizer. assertions are ignored when generating ctree
"""
IPROP_SPLIT = _ida_hexrays.IPROP_SPLIT
"""
the instruction has been split:
"""
IPROP_SPLIT1 = _ida_hexrays.IPROP_SPLIT1
"""
into 1 byte
"""
IPROP_SPLIT2 = _ida_hexrays.IPROP_SPLIT2
"""
into 2 bytes
"""
IPROP_SPLIT4 = _ida_hexrays.IPROP_SPLIT4
"""
into 4 bytes
"""
IPROP_SPLIT8 = _ida_hexrays.IPROP_SPLIT8
"""
into 8 bytes
"""
IPROP_COMBINED = _ida_hexrays.IPROP_COMBINED
"""
insn has been modified because of a partial reference
"""
IPROP_EXTSTX = _ida_hexrays.IPROP_EXTSTX
"""
this is m_ext propagated into m_stx
"""
IPROP_IGNLOWSRC = _ida_hexrays.IPROP_IGNLOWSRC
"""
low part of the instruction source operand has been created
artificially (this bit is used only for 'and x, 80...')
"""
IPROP_INV_JX = _ida_hexrays.IPROP_INV_JX
"""
inverted conditional jump
"""
IPROP_WAS_NORET = _ida_hexrays.IPROP_WAS_NORET
"""
was noret icall
"""
IPROP_MULTI_MOV = _ida_hexrays.IPROP_MULTI_MOV
"""
(example: STM on ARM may transfer multiple registers)
the minsn was generated as part of insn that moves multiple
registersbits that can be set by plugins:
"""
IPROP_DONT_PROP = _ida_hexrays.IPROP_DONT_PROP
"""
may not propagate
"""
IPROP_DONT_COMB = _ida_hexrays.IPROP_DONT_COMB
"""
may not combine this instruction with others
"""
OPTI_ADDREXPRS = _ida_hexrays.OPTI_ADDREXPRS
"""
optimize all address expressions (&x+N; &x-&y)
"""
OPTI_MINSTKREF = _ida_hexrays.OPTI_MINSTKREF
"""
may update minstkref
"""
OPTI_COMBINSNS = _ida_hexrays.OPTI_COMBINSNS
"""
may combine insns (only for optimize_insn)
"""
OPTI_NO_LDXOPT = _ida_hexrays.OPTI_NO_LDXOPT
"""
do not optimize low/high(ldx)
"""
EQ_IGNSIZE = _ida_hexrays.EQ_IGNSIZE
"""
ignore operand sizes
"""
EQ_IGNCODE = _ida_hexrays.EQ_IGNCODE
"""
ignore instruction opcodes
"""
EQ_CMPDEST = _ida_hexrays.EQ_CMPDEST
"""
compare instruction destinations
"""
EQ_OPTINSN = _ida_hexrays.EQ_OPTINSN
"""
optimize mop_d operands
"""
def getf_reginsn(*args):
"""
getf_reginsn(ins) -> minsn_t
Skip assertions forward.
@param ins (C++: const minsn_t *)
"""
return _ida_hexrays.getf_reginsn(*args)
def getb_reginsn(*args):
"""
getb_reginsn(ins) -> minsn_t
Skip assertions backward.
@param ins (C++: const minsn_t *)
"""
return _ida_hexrays.getb_reginsn(*args)
BLT_NONE = _ida_hexrays.BLT_NONE
BLT_STOP = _ida_hexrays.BLT_STOP
BLT_0WAY = _ida_hexrays.BLT_0WAY
BLT_1WAY = _ida_hexrays.BLT_1WAY
BLT_2WAY = _ida_hexrays.BLT_2WAY
BLT_NWAY = _ida_hexrays.BLT_NWAY
BLT_XTRN = _ida_hexrays.BLT_XTRN
mblock_t_swigregister = _ida_hexrays.mblock_t_swigregister
mblock_t_swigregister(mblock_t)
MBL_PRIV = _ida_hexrays.MBL_PRIV
"""
the specified are accepted (used in patterns)
private block - no instructions except
"""
MBL_NONFAKE = _ida_hexrays.MBL_NONFAKE
"""
regular block
"""
MBL_FAKE = _ida_hexrays.MBL_FAKE
"""
fake block (after a tail call)
"""
MBL_GOTO = _ida_hexrays.MBL_GOTO
"""
this block is a goto target
"""
MBL_TCAL = _ida_hexrays.MBL_TCAL
"""
aritifical call block for tail calls
"""
MBL_PUSH = _ida_hexrays.MBL_PUSH
"""
needs "convert push/pop instructions"
"""
MBL_DMT64 = _ida_hexrays.MBL_DMT64
"""
needs "demote 64bits"
"""
MBL_COMB = _ida_hexrays.MBL_COMB
"""
needs "combine" pass
"""
MBL_PROP = _ida_hexrays.MBL_PROP
"""
needs 'propagation' pass
"""
MBL_DEAD = _ida_hexrays.MBL_DEAD
"""
needs "eliminate deads" pass
"""
MBL_LIST = _ida_hexrays.MBL_LIST
"""
use/def lists are ready (not dirty)
"""
MBL_INCONST = _ida_hexrays.MBL_INCONST
"""
inconsistent lists: we are building them
"""
MBL_CALL = _ida_hexrays.MBL_CALL
"""
call information has been built
"""
MBL_BACKPROP = _ida_hexrays.MBL_BACKPROP
"""
performed backprop_cc
"""
MBL_NORET = _ida_hexrays.MBL_NORET
"""
dead end block: doesn't return execution control
"""
MBL_DSLOT = _ida_hexrays.MBL_DSLOT
"""
block for delay slot
"""
MBL_VALRANGES = _ida_hexrays.MBL_VALRANGES
"""
should optimize using value ranges
"""
FD_BACKWARD = _ida_hexrays.FD_BACKWARD
"""
search direction
"""
FD_FORWARD = _ida_hexrays.FD_FORWARD
"""
search direction
"""
FD_USE = _ida_hexrays.FD_USE
"""
look for use
"""
FD_DEF = _ida_hexrays.FD_DEF
"""
look for definition
"""
FD_DIRTY = _ida_hexrays.FD_DIRTY
"""
by function calls and indirect memory access
ignore possible implicit definitions
"""
VR_AT_START = _ida_hexrays.VR_AT_START
"""
at the block start (if M is NULL)
get value ranges before the instruction or
"""
VR_AT_END = _ida_hexrays.VR_AT_END
"""
get value ranges after the instruction or at the block end, just after
the last instruction (if M is NULL)
"""
VR_EXACT = _ida_hexrays.VR_EXACT
"""
valrng size will be >= vivl.size
find exact match. if not set, the returned
"""
WARN_VARARG_REGS = _ida_hexrays.WARN_VARARG_REGS
WARN_ILL_PURGED = _ida_hexrays.WARN_ILL_PURGED
WARN_ILL_FUNCTYPE = _ida_hexrays.WARN_ILL_FUNCTYPE
WARN_VARARG_TCAL = _ida_hexrays.WARN_VARARG_TCAL
WARN_VARARG_NOSTK = _ida_hexrays.WARN_VARARG_NOSTK
WARN_VARARG_MANY = _ida_hexrays.WARN_VARARG_MANY
WARN_ADDR_OUTARGS = _ida_hexrays.WARN_ADDR_OUTARGS
WARN_DEP_UNK_CALLS = _ida_hexrays.WARN_DEP_UNK_CALLS
WARN_ILL_ELLIPSIS = _ida_hexrays.WARN_ILL_ELLIPSIS
WARN_GUESSED_TYPE = _ida_hexrays.WARN_GUESSED_TYPE
WARN_EXP_LINVAR = _ida_hexrays.WARN_EXP_LINVAR
WARN_WIDEN_CHAINS = _ida_hexrays.WARN_WIDEN_CHAINS
WARN_BAD_PURGED = _ida_hexrays.WARN_BAD_PURGED
WARN_CBUILD_LOOPS = _ida_hexrays.WARN_CBUILD_LOOPS
WARN_NO_SAVE_REST = _ida_hexrays.WARN_NO_SAVE_REST
WARN_ODD_INPUT_REG = _ida_hexrays.WARN_ODD_INPUT_REG
WARN_ODD_ADDR_USE = _ida_hexrays.WARN_ODD_ADDR_USE
WARN_MUST_RET_FP = _ida_hexrays.WARN_MUST_RET_FP
WARN_ILL_FPU_STACK = _ida_hexrays.WARN_ILL_FPU_STACK
WARN_SELFREF_PROP = _ida_hexrays.WARN_SELFREF_PROP
WARN_WOULD_OVERLAP = _ida_hexrays.WARN_WOULD_OVERLAP
WARN_ARRAY_INARG = _ida_hexrays.WARN_ARRAY_INARG
WARN_MAX_ARGS = _ida_hexrays.WARN_MAX_ARGS
WARN_BAD_FIELD_TYPE = _ida_hexrays.WARN_BAD_FIELD_TYPE
WARN_WRITE_CONST = _ida_hexrays.WARN_WRITE_CONST
WARN_BAD_RETVAR = _ida_hexrays.WARN_BAD_RETVAR
WARN_FRAG_LVAR = _ida_hexrays.WARN_FRAG_LVAR
WARN_HUGE_STKOFF = _ida_hexrays.WARN_HUGE_STKOFF
WARN_UNINITED_REG = _ida_hexrays.WARN_UNINITED_REG
WARN_FIXED_MACRO = _ida_hexrays.WARN_FIXED_MACRO
WARN_WRONG_VA_OFF = _ida_hexrays.WARN_WRONG_VA_OFF
WARN_CR_NOFIELD = _ida_hexrays.WARN_CR_NOFIELD
WARN_CR_BADOFF = _ida_hexrays.WARN_CR_BADOFF
WARN_BAD_STROFF = _ida_hexrays.WARN_BAD_STROFF
WARN_BAD_VARSIZE = _ida_hexrays.WARN_BAD_VARSIZE
WARN_UNSUPP_REG = _ida_hexrays.WARN_UNSUPP_REG
WARN_UNALIGNED_ARG = _ida_hexrays.WARN_UNALIGNED_ARG
WARN_BAD_STD_TYPE = _ida_hexrays.WARN_BAD_STD_TYPE
WARN_BAD_CALL_SP = _ida_hexrays.WARN_BAD_CALL_SP
WARN_MISSED_SWITCH = _ida_hexrays.WARN_MISSED_SWITCH
WARN_BAD_SP = _ida_hexrays.WARN_BAD_SP
WARN_BAD_STKPNT = _ida_hexrays.WARN_BAD_STKPNT
WARN_UNDEF_LVAR = _ida_hexrays.WARN_UNDEF_LVAR
WARN_JUMPOUT = _ida_hexrays.WARN_JUMPOUT
WARN_BAD_VALRNG = _ida_hexrays.WARN_BAD_VALRNG
WARN_BAD_SHADOW = _ida_hexrays.WARN_BAD_SHADOW
WARN_MAX = _ida_hexrays.WARN_MAX
hexwarn_t_swigregister = _ida_hexrays.hexwarn_t_swigregister
hexwarn_t_swigregister(hexwarn_t)
MMAT_ZERO = _ida_hexrays.MMAT_ZERO
MMAT_GENERATED = _ida_hexrays.MMAT_GENERATED
MMAT_PREOPTIMIZED = _ida_hexrays.MMAT_PREOPTIMIZED
MMAT_LOCOPT = _ida_hexrays.MMAT_LOCOPT
MMAT_CALLS = _ida_hexrays.MMAT_CALLS
MMAT_GLBOPT1 = _ida_hexrays.MMAT_GLBOPT1
MMAT_GLBOPT2 = _ida_hexrays.MMAT_GLBOPT2
MMAT_GLBOPT3 = _ida_hexrays.MMAT_GLBOPT3
MMAT_LVARS = _ida_hexrays.MMAT_LVARS
MMIDX_GLBLOW = _ida_hexrays.MMIDX_GLBLOW
MMIDX_LVARS = _ida_hexrays.MMIDX_LVARS
MMIDX_RETADDR = _ida_hexrays.MMIDX_RETADDR
MMIDX_SHADOW = _ida_hexrays.MMIDX_SHADOW
MMIDX_ARGS = _ida_hexrays.MMIDX_ARGS
MMIDX_GLBHIGH = _ida_hexrays.MMIDX_GLBHIGH
mba_ranges_t_swigregister = _ida_hexrays.mba_ranges_t_swigregister
mba_ranges_t_swigregister(mba_ranges_t)
mba_range_iterator_t_swigregister = _ida_hexrays.mba_range_iterator_t_swigregister
mba_range_iterator_t_swigregister(mba_range_iterator_t)
mbl_array_t_swigregister = _ida_hexrays.mbl_array_t_swigregister
mbl_array_t_swigregister(mbl_array_t)
MBA_PRCDEFS = _ida_hexrays.MBA_PRCDEFS
"""
use precise defeas for chain-allocated lvars
"""
MBA_NOFUNC = _ida_hexrays.MBA_NOFUNC
"""
function is not present, addresses might be wrong
"""
MBA_PATTERN = _ida_hexrays.MBA_PATTERN
"""
microcode pattern, callinfo is present
"""
MBA_LOADED = _ida_hexrays.MBA_LOADED
"""
loaded gdl, no instructions (debugging)
"""
MBA_RETFP = _ida_hexrays.MBA_RETFP
"""
function returns floating point value
"""
MBA_SPLINFO = _ida_hexrays.MBA_SPLINFO
"""
(final_type ? idb_spoiled : spoiled_regs) is valid
"""
MBA_PASSREGS = _ida_hexrays.MBA_PASSREGS
"""
has 'mcallinfo_t::pass_regs'
"""
MBA_THUNK = _ida_hexrays.MBA_THUNK
"""
thunk function
"""
MBA_CMNSTK = _ida_hexrays.MBA_CMNSTK
"""
stkvars+stkargs should be considered as one area
"""
MBA_PREOPT = _ida_hexrays.MBA_PREOPT
"""
preoptimization stage complete
"""
MBA_CMBBLK = _ida_hexrays.MBA_CMBBLK
"""
request to combine blocks
"""
MBA_ASRTOK = _ida_hexrays.MBA_ASRTOK
"""
assertions have been generated
"""
MBA_CALLS = _ida_hexrays.MBA_CALLS
"""
callinfo has been built
"""
MBA_ASRPROP = _ida_hexrays.MBA_ASRPROP
"""
assertion have been propagated
"""
MBA_SAVRST = _ida_hexrays.MBA_SAVRST
"""
save-restore analysis has been performed
"""
MBA_RETREF = _ida_hexrays.MBA_RETREF
"""
return type has been refined
"""
MBA_GLBOPT = _ida_hexrays.MBA_GLBOPT
"""
microcode has been optimized globally
"""
MBA_OVERVAR = _ida_hexrays.MBA_OVERVAR
"""
an overlapped variable has been detected
"""
MBA_LVARS0 = _ida_hexrays.MBA_LVARS0
"""
lvar pre-allocation has been performed
"""
MBA_LVARS1 = _ida_hexrays.MBA_LVARS1
"""
lvar real allocation has been performed
"""
MBA_DELPAIRS = _ida_hexrays.MBA_DELPAIRS
"""
pairs have been deleted once
"""
MBA_CHVARS = _ida_hexrays.MBA_CHVARS
"""
can verify chain varnums
"""
MBA_SHORT = _ida_hexrays.MBA_SHORT
"""
use short display
"""
MBA_COLGDL = _ida_hexrays.MBA_COLGDL
"""
display graph after each reduction
"""
MBA_INSGDL = _ida_hexrays.MBA_INSGDL
"""
display instruction in graphs
"""
MBA_NICE = _ida_hexrays.MBA_NICE
"""
apply transformations to c code
"""
MBA_REFINE = _ida_hexrays.MBA_REFINE
"""
may refine return value size
"""
MBA_RESERVED = _ida_hexrays.MBA_RESERVED
MBA_WINGR32 = _ida_hexrays.MBA_WINGR32
"""
use wingraph32
"""
MBA_NUMADDR = _ida_hexrays.MBA_NUMADDR
"""
display definition addresses for numbers
"""
MBA_VALNUM = _ida_hexrays.MBA_VALNUM
"""
display value numbers
"""
MBA_INITIAL_FLAGS = _ida_hexrays.MBA_INITIAL_FLAGS
MBA2_LVARNAMES_OK = _ida_hexrays.MBA2_LVARNAMES_OK
MBA2_LVARS_RENAMED = _ida_hexrays.MBA2_LVARS_RENAMED
MBA2_OVER_CHAINS = _ida_hexrays.MBA2_OVER_CHAINS
MBA2_VALRNG_DONE = _ida_hexrays.MBA2_VALRNG_DONE
MBA2_IS_CTR = _ida_hexrays.MBA2_IS_CTR
MBA2_IS_DTR = _ida_hexrays.MBA2_IS_DTR
MBA2_ARGIDX_OK = _ida_hexrays.MBA2_ARGIDX_OK
MBA2_NO_DUP_CALLS = _ida_hexrays.MBA2_NO_DUP_CALLS
MBA2_NO_DUP_LVARS = _ida_hexrays.MBA2_NO_DUP_LVARS
MBA2_INITIAL_FLAGS = _ida_hexrays.MBA2_INITIAL_FLAGS
MBA2_ALL_FLAGS = _ida_hexrays.MBA2_ALL_FLAGS
NALT_VD = _ida_hexrays.NALT_VD
"""
this index is not used by ida
"""
LOCOPT_ALL = _ida_hexrays.LOCOPT_ALL
"""
is not set, only dirty blocks will be optimized
redo optimization for all blocks. if this bit
"""
LOCOPT_REFINE = _ida_hexrays.LOCOPT_REFINE
"""
refine return type, ok to fail
"""
LOCOPT_REFINE2 = _ida_hexrays.LOCOPT_REFINE2
"""
refine return type, try harder
"""
ACFL_LOCOPT = _ida_hexrays.ACFL_LOCOPT
"""
perform local propagation (requires ACFL_BLKOPT)
"""
ACFL_BLKOPT = _ida_hexrays.ACFL_BLKOPT
"""
perform interblock transformations
"""
ACFL_GLBPROP = _ida_hexrays.ACFL_GLBPROP
"""
perform global propagation
"""
ACFL_GLBDEL = _ida_hexrays.ACFL_GLBDEL
"""
perform dead code eliminition
"""
ACFL_GUESS = _ida_hexrays.ACFL_GUESS
"""
may guess calling conventions
"""
CPBLK_FAST = _ida_hexrays.CPBLK_FAST
"""
do not update minbstkref and minbargref
"""
CPBLK_MINREF = _ida_hexrays.CPBLK_MINREF
"""
update minbstkref and minbargref
"""
CPBLK_OPTJMP = _ida_hexrays.CPBLK_OPTJMP
"""
if it becomes useless
del the jump insn at the end of the block
"""
def mbl_array_t_deserialize(*args):
"""
mbl_array_t_deserialize(bytes, nbytes) -> mbl_array_t
"""
return _ida_hexrays.mbl_array_t_deserialize(*args)
chain_keeper_t_swigregister = _ida_hexrays.chain_keeper_t_swigregister
chain_keeper_t_swigregister(chain_keeper_t)
GC_REGS_AND_STKVARS = _ida_hexrays.GC_REGS_AND_STKVARS
GC_ASR = _ida_hexrays.GC_ASR
GC_XDSU = _ida_hexrays.GC_XDSU
GC_END = _ida_hexrays.GC_END
GC_DIRTY_ALL = _ida_hexrays.GC_DIRTY_ALL
mbl_graph_t_swigregister = _ida_hexrays.mbl_graph_t_swigregister
mbl_graph_t_swigregister(mbl_graph_t)
codegen_t_swigregister = _ida_hexrays.codegen_t_swigregister
codegen_t_swigregister(codegen_t)
def is_kreg(*args):
"""
is_kreg(r) -> bool
Is a kernel register?
@param r (C++: mreg_t)
"""
return _ida_hexrays.is_kreg(*args)
def get_temp_regs(*args):
"""
get_temp_regs() -> mlist_t
Get list of temporary registers. Tempregs are temporary registers that
are used during code generation. They do not map to regular processor
registers. They are used only to store temporary values during
execution of one instruction. Tempregs may not be used to pass a value
from one block to another. In other words, at the end of a block all
tempregs must be dead.
"""
return _ida_hexrays.get_temp_regs(*args)
def get_hexrays_version(*args):
"""
get_hexrays_version() -> char const *
Get decompiler version. The returned string is of the form
<major>.<minor>.<revision>.<build-date>
@return: pointer to version string. For example: "2.0.0.140605"
"""
return _ida_hexrays.get_hexrays_version(*args)
def checkout_hexrays_license(*args):
"""
checkout_hexrays_license(silent) -> bool
Check out a floating decompiler license. This function will display a
dialog box if the license is not available. For non-floating licenses
this function is effectively no-op. It is not necessary to call this
function before decompiling. If the license was not checked out, the
decompiler will automatically do it. This function can be used to
check out a license in advance and ensure that a license is available.
@param silent: silently fail if the license can not be checked out.
(C++: bool)
@return: false if failed
"""
return _ida_hexrays.checkout_hexrays_license(*args)
def open_pseudocode(*args):
"""
open_pseudocode(ea, new_window) -> vdui_t
Open pseudocode window. The specified function is decompiled and the
pseudocode window is opened.
@param ea: function to decompile (C++: ea_t)
@param new_window: 0:reuse existing window; 1:open new window; -1:
reuse existing window if the current view is
pseudocode (C++: int)
@return: false if failed
"""
return _ida_hexrays.open_pseudocode(*args)
def close_pseudocode(*args):
"""
close_pseudocode(f) -> bool
Close pseudocode window.
@param f: pointer to window (C++: TWidget *)
@return: false if failed
"""
return _ida_hexrays.close_pseudocode(*args)
VDRUN_NEWFILE = _ida_hexrays.VDRUN_NEWFILE
"""
Create a new file or overwrite existing file.
"""
VDRUN_APPEND = _ida_hexrays.VDRUN_APPEND
"""
Create a new file or append to existing file.
"""
VDRUN_ONLYNEW = _ida_hexrays.VDRUN_ONLYNEW
"""
Fail if output file already exists.
"""
VDRUN_SILENT = _ida_hexrays.VDRUN_SILENT
"""
Silent decompilation.
"""
VDRUN_SENDIDB = _ida_hexrays.VDRUN_SENDIDB
"""
Send problematic databases to hex-rays.com.
"""
VDRUN_MAYSTOP = _ida_hexrays.VDRUN_MAYSTOP
"""
the user can cancel decompilation
"""
VDRUN_CMDLINE = _ida_hexrays.VDRUN_CMDLINE
"""
called from ida's command line
"""
VDRUN_STATS = _ida_hexrays.VDRUN_STATS
"""
print statistics into vd_stats.txt
"""
VDRUN_LUMINA = _ida_hexrays.VDRUN_LUMINA
"""
use lumina server
"""
def decompile_many(*args):
"""
decompile_many(outfile, funcaddrs, flags) -> bool
Batch decompilation. Decompile all or the specified functions
@param outfile: name of the output file (C++: const char *)
@param funcaddrs: list of functions to decompile. If NULL or empty,
then decompile all nonlib functions (C++: eavec_t
*)
@param flags: Batch decompilation bits (C++: int)
@return: true if no internal error occurred and the user has not
cancelled decompilation
"""
return _ida_hexrays.decompile_many(*args)
hexrays_failure_t_swigregister = _ida_hexrays.hexrays_failure_t_swigregister
hexrays_failure_t_swigregister(hexrays_failure_t)
vd_failure_t_swigregister = _ida_hexrays.vd_failure_t_swigregister
vd_failure_t_swigregister(vd_failure_t)
vd_interr_t_swigregister = _ida_hexrays.vd_interr_t_swigregister
vd_interr_t_swigregister(vd_interr_t)
def send_database(*args):
"""
send_database(err, silent)
Send the database to Hex-Rays. This function sends the current
database to the Hex-Rays server. The database is sent in the
compressed form over an encrypted (SSL) connection.
@param err: failure description object. Empty hexrays_failure_t
object can be used if error information is not available.
(C++: const hexrays_failure_t &)
@param silent: if false, a dialog box will be displayed before sending
the database. (C++: bool)
"""
return _ida_hexrays.send_database(*args)
gco_info_t_swigregister = _ida_hexrays.gco_info_t_swigregister
gco_info_t_swigregister(gco_info_t)
GCO_STK = _ida_hexrays.GCO_STK
"""
a stack variable
"""
GCO_REG = _ida_hexrays.GCO_REG
"""
is register? otherwise a stack variable
"""
GCO_USE = _ida_hexrays.GCO_USE
"""
is source operand?
"""
GCO_DEF = _ida_hexrays.GCO_DEF
"""
is destination operand?
"""
def get_current_operand(*args):
"""
get_current_operand(out) -> bool
Get the instruction operand under the cursor. This function determines
the operand that is under the cursor in the active disassembly
listing. If the operand refers to a register or stack variable, it
return true.
@param out (C++: gco_info_t *)
"""
return _ida_hexrays.get_current_operand(*args)
def remitem(*args):
"""
remitem(e)
"""
return _ida_hexrays.remitem(*args)
cot_empty = _ida_hexrays.cot_empty
cot_comma = _ida_hexrays.cot_comma
cot_asg = _ida_hexrays.cot_asg
cot_asgbor = _ida_hexrays.cot_asgbor
cot_asgxor = _ida_hexrays.cot_asgxor
cot_asgband = _ida_hexrays.cot_asgband
cot_asgadd = _ida_hexrays.cot_asgadd
cot_asgsub = _ida_hexrays.cot_asgsub
cot_asgmul = _ida_hexrays.cot_asgmul
cot_asgsshr = _ida_hexrays.cot_asgsshr
cot_asgushr = _ida_hexrays.cot_asgushr
cot_asgshl = _ida_hexrays.cot_asgshl
cot_asgsdiv = _ida_hexrays.cot_asgsdiv
cot_asgudiv = _ida_hexrays.cot_asgudiv
cot_asgsmod = _ida_hexrays.cot_asgsmod
cot_asgumod = _ida_hexrays.cot_asgumod
cot_tern = _ida_hexrays.cot_tern
cot_lor = _ida_hexrays.cot_lor
cot_land = _ida_hexrays.cot_land
cot_bor = _ida_hexrays.cot_bor
cot_xor = _ida_hexrays.cot_xor
cot_band = _ida_hexrays.cot_band
cot_eq = _ida_hexrays.cot_eq
cot_ne = _ida_hexrays.cot_ne
cot_sge = _ida_hexrays.cot_sge
cot_uge = _ida_hexrays.cot_uge
cot_sle = _ida_hexrays.cot_sle
cot_ule = _ida_hexrays.cot_ule
cot_sgt = _ida_hexrays.cot_sgt
cot_ugt = _ida_hexrays.cot_ugt
cot_slt = _ida_hexrays.cot_slt
cot_ult = _ida_hexrays.cot_ult
cot_sshr = _ida_hexrays.cot_sshr
cot_ushr = _ida_hexrays.cot_ushr
cot_shl = _ida_hexrays.cot_shl
cot_add = _ida_hexrays.cot_add
cot_sub = _ida_hexrays.cot_sub
cot_mul = _ida_hexrays.cot_mul
cot_sdiv = _ida_hexrays.cot_sdiv
cot_udiv = _ida_hexrays.cot_udiv
cot_smod = _ida_hexrays.cot_smod
cot_umod = _ida_hexrays.cot_umod
cot_fadd = _ida_hexrays.cot_fadd
cot_fsub = _ida_hexrays.cot_fsub
cot_fmul = _ida_hexrays.cot_fmul
cot_fdiv = _ida_hexrays.cot_fdiv
cot_fneg = _ida_hexrays.cot_fneg
cot_neg = _ida_hexrays.cot_neg
cot_cast = _ida_hexrays.cot_cast
cot_lnot = _ida_hexrays.cot_lnot
cot_bnot = _ida_hexrays.cot_bnot
cot_ptr = _ida_hexrays.cot_ptr
cot_ref = _ida_hexrays.cot_ref
cot_postinc = _ida_hexrays.cot_postinc
cot_postdec = _ida_hexrays.cot_postdec
cot_preinc = _ida_hexrays.cot_preinc
cot_predec = _ida_hexrays.cot_predec
cot_call = _ida_hexrays.cot_call
cot_idx = _ida_hexrays.cot_idx
cot_memref = _ida_hexrays.cot_memref
cot_memptr = _ida_hexrays.cot_memptr
cot_num = _ida_hexrays.cot_num
cot_fnum = _ida_hexrays.cot_fnum
cot_str = _ida_hexrays.cot_str
cot_obj = _ida_hexrays.cot_obj
cot_var = _ida_hexrays.cot_var
cot_insn = _ida_hexrays.cot_insn
cot_sizeof = _ida_hexrays.cot_sizeof
cot_helper = _ida_hexrays.cot_helper
cot_type = _ida_hexrays.cot_type
cot_last = _ida_hexrays.cot_last
cit_empty = _ida_hexrays.cit_empty
cit_block = _ida_hexrays.cit_block
cit_expr = _ida_hexrays.cit_expr
cit_if = _ida_hexrays.cit_if
cit_for = _ida_hexrays.cit_for
cit_while = _ida_hexrays.cit_while
cit_do = _ida_hexrays.cit_do
cit_switch = _ida_hexrays.cit_switch
cit_break = _ida_hexrays.cit_break
cit_continue = _ida_hexrays.cit_continue
cit_return = _ida_hexrays.cit_return
cit_goto = _ida_hexrays.cit_goto
cit_asm = _ida_hexrays.cit_asm
cit_end = _ida_hexrays.cit_end
operator_info_t_swigregister = _ida_hexrays.operator_info_t_swigregister
operator_info_t_swigregister(operator_info_t)
FX_NONE = cvar.FX_NONE
FX_INFIX = cvar.FX_INFIX
FX_PREFIX = cvar.FX_PREFIX
FX_POSTFIX = cvar.FX_POSTFIX
FX_TERNARY = cvar.FX_TERNARY
COI_RL = cvar.COI_RL
COI_LR = cvar.COI_LR
COI_INT = cvar.COI_INT
COI_FP = cvar.COI_FP
COI_SH = cvar.COI_SH
COI_SGN = cvar.COI_SGN
COI_SBN = cvar.COI_SBN
def negated_relation(*args):
"""
negated_relation(op) -> ctype_t
Negate a comparison operator. For example, cot_sge becomes cot_slt.
@param op (C++: ctype_t)
"""
return _ida_hexrays.negated_relation(*args)
def swapped_relation(*args):
"""
swapped_relation(op) -> ctype_t
Swap a comparison operator. For example, cot_sge becomes cot_sle.
@param op (C++: ctype_t)
"""
return _ida_hexrays.swapped_relation(*args)
def get_op_signness(*args):
"""
get_op_signness(op) -> type_sign_t
Get operator sign. Meaningful for sign-dependent operators, like
cot_sdiv.
@param op (C++: ctype_t)
"""
return _ida_hexrays.get_op_signness(*args)
def asgop(*args):
"""
asgop(cop) -> ctype_t
Convert plain operator into assignment operator. For example, cot_add
returns cot_asgadd.
@param cop (C++: ctype_t)
"""
return _ida_hexrays.asgop(*args)
def asgop_revert(*args):
"""
asgop_revert(cop) -> ctype_t
Convert assignment operator into plain operator. For example,
cot_asgadd returns cot_add
@param cop (C++: ctype_t)
@return: cot_empty is the input operator is not an assignment
operator.
"""
return _ida_hexrays.asgop_revert(*args)
def op_uses_x(*args):
"""
op_uses_x(op) -> bool
Does operator use the 'x' field of 'cexpr_t' ?
@param op (C++: ctype_t)
"""
return _ida_hexrays.op_uses_x(*args)
def op_uses_y(*args):
"""
op_uses_y(op) -> bool
Does operator use the 'y' field of 'cexpr_t' ?
@param op (C++: ctype_t)
"""
return _ida_hexrays.op_uses_y(*args)
def op_uses_z(*args):
"""
op_uses_z(op) -> bool
Does operator use the 'z' field of 'cexpr_t' ?
@param op (C++: ctype_t)
"""
return _ida_hexrays.op_uses_z(*args)
def is_binary(*args):
"""
is_binary(op) -> bool
Is binary operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_binary(*args)
def is_unary(*args):
"""
is_unary(op) -> bool
Is unary operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_unary(*args)
def is_relational(*args):
"""
is_relational(op) -> bool
Is comparison operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_relational(*args)
def is_assignment(*args):
"""
is_assignment(op) -> bool
Is assignment operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_assignment(*args)
def accepts_udts(*args):
"""
accepts_udts(op) -> bool
"""
return _ida_hexrays.accepts_udts(*args)
def is_prepost(*args):
"""
is_prepost(op) -> bool
Is pre/post increment/decrement operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_prepost(*args)
def is_commutative(*args):
"""
is_commutative(op) -> bool
Is commutative operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_commutative(*args)
def is_additive(*args):
"""
is_additive(op) -> bool
Is additive operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_additive(*args)
def is_multiplicative(*args):
"""
is_multiplicative(op) -> bool
Is multiplicative operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_multiplicative(*args)
def is_bitop(*args):
"""
is_bitop(op) -> bool
Is bit related operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_bitop(*args)
def is_logical(*args):
"""
is_logical(op) -> bool
Is logical operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_logical(*args)
def is_loop(*args):
"""
is_loop(op) -> bool
Is loop statement code?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_loop(*args)
def is_break_consumer(*args):
"""
is_break_consumer(op) -> bool
Does a break statement influence the specified statement code?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_break_consumer(*args)
def is_lvalue(*args):
"""
is_lvalue(op) -> bool
Is Lvalue operator?
@param op (C++: ctype_t)
"""
return _ida_hexrays.is_lvalue(*args)
def accepts_small_udts(*args):
"""
accepts_small_udts(op) -> bool
Is the operator allowed on small structure or union?
@param op (C++: ctype_t)
"""
return _ida_hexrays.accepts_small_udts(*args)
cnumber_t_swigregister = _ida_hexrays.cnumber_t_swigregister
cnumber_t_swigregister(cnumber_t)
var_ref_t_swigregister = _ida_hexrays.var_ref_t_swigregister
var_ref_t_swigregister(var_ref_t)
ctree_visitor_t_swigregister = _ida_hexrays.ctree_visitor_t_swigregister
ctree_visitor_t_swigregister(ctree_visitor_t)
CV_FAST = _ida_hexrays.CV_FAST
"""
do not maintain parent information
"""
CV_PRUNE = _ida_hexrays.CV_PRUNE
"""
this bit is set by visit...() to prune the walk
"""
CV_PARENTS = _ida_hexrays.CV_PARENTS
"""
maintain parent information
"""
CV_POST = _ida_hexrays.CV_POST
"""
call the leave...() functions
"""
CV_RESTART = _ida_hexrays.CV_RESTART
"""
restart enumeration at the top expr (apply_to_exprs)
"""
CV_INSNS = _ida_hexrays.CV_INSNS
"""
visit only statements, prune all expressions do not use before the
final ctree maturity because expressions may contain statements at
intermediate stages (see cot_insn). Otherwise you risk missing
statements embedded into expressions.
"""
ctree_parentee_t_swigregister = _ida_hexrays.ctree_parentee_t_swigregister
ctree_parentee_t_swigregister(ctree_parentee_t)
cfunc_parentee_t_swigregister = _ida_hexrays.cfunc_parentee_t_swigregister
cfunc_parentee_t_swigregister(cfunc_parentee_t)
CMAT_ZERO = _ida_hexrays.CMAT_ZERO
CMAT_BUILT = _ida_hexrays.CMAT_BUILT
CMAT_TRANS1 = _ida_hexrays.CMAT_TRANS1
CMAT_NICE = _ida_hexrays.CMAT_NICE
CMAT_TRANS2 = _ida_hexrays.CMAT_TRANS2
CMAT_CPA = _ida_hexrays.CMAT_CPA
CMAT_TRANS3 = _ida_hexrays.CMAT_TRANS3
CMAT_CASTED = _ida_hexrays.CMAT_CASTED
CMAT_FINAL = _ida_hexrays.CMAT_FINAL
ITP_EMPTY = _ida_hexrays.ITP_EMPTY
ITP_ARG1 = _ida_hexrays.ITP_ARG1
ITP_ARG64 = _ida_hexrays.ITP_ARG64
ITP_BRACE1 = _ida_hexrays.ITP_BRACE1
ITP_INNER_LAST = _ida_hexrays.ITP_INNER_LAST
ITP_ASM = _ida_hexrays.ITP_ASM
ITP_ELSE = _ida_hexrays.ITP_ELSE
ITP_DO = _ida_hexrays.ITP_DO
ITP_SEMI = _ida_hexrays.ITP_SEMI
ITP_CURLY1 = _ida_hexrays.ITP_CURLY1
ITP_CURLY2 = _ida_hexrays.ITP_CURLY2
ITP_BRACE2 = _ida_hexrays.ITP_BRACE2
ITP_COLON = _ida_hexrays.ITP_COLON
ITP_BLOCK1 = _ida_hexrays.ITP_BLOCK1
ITP_BLOCK2 = _ida_hexrays.ITP_BLOCK2
ITP_CASE = _ida_hexrays.ITP_CASE
ITP_SIGN = _ida_hexrays.ITP_SIGN
treeloc_t_swigregister = _ida_hexrays.treeloc_t_swigregister
treeloc_t_swigregister(treeloc_t)
RETRIEVE_ONCE = _ida_hexrays.RETRIEVE_ONCE
RETRIEVE_ALWAYS = _ida_hexrays.RETRIEVE_ALWAYS
citem_cmt_t_swigregister = _ida_hexrays.citem_cmt_t_swigregister
citem_cmt_t_swigregister(citem_cmt_t)
citem_locator_t_swigregister = _ida_hexrays.citem_locator_t_swigregister
citem_locator_t_swigregister(citem_locator_t)
bit_bound_t_swigregister = _ida_hexrays.bit_bound_t_swigregister
bit_bound_t_swigregister(bit_bound_t)
citem_t_swigregister = _ida_hexrays.citem_t_swigregister
citem_t_swigregister(citem_t)
cexpr_t_swigregister = _ida_hexrays.cexpr_t_swigregister
cexpr_t_swigregister(cexpr_t)
EXFL_CPADONE = _ida_hexrays.EXFL_CPADONE
"""
pointer arithmetic correction done
"""
EXFL_LVALUE = _ida_hexrays.EXFL_LVALUE
"""
expression is lvalue even if it doesn't look like it
"""
EXFL_FPOP = _ida_hexrays.EXFL_FPOP
"""
floating point operation
"""
EXFL_ALONE = _ida_hexrays.EXFL_ALONE
"""
standalone helper
"""
EXFL_CSTR = _ida_hexrays.EXFL_CSTR
"""
string literal
"""
EXFL_PARTIAL = _ida_hexrays.EXFL_PARTIAL
"""
type of the expression is considered partial
"""
EXFL_UNDEF = _ida_hexrays.EXFL_UNDEF
"""
expression uses undefined value
"""
EXFL_JUMPOUT = _ida_hexrays.EXFL_JUMPOUT
"""
jump out-of-function
"""
EXFL_VFTABLE = _ida_hexrays.EXFL_VFTABLE
"""
is ptr to vftable (used for cot_memptr, cot_memref)
"""
EXFL_ALL = _ida_hexrays.EXFL_ALL
"""
all currently defined bits
"""
ceinsn_t_swigregister = _ida_hexrays.ceinsn_t_swigregister
ceinsn_t_swigregister(ceinsn_t)
CALC_CURLY_BRACES = _ida_hexrays.CALC_CURLY_BRACES
NO_CURLY_BRACES = _ida_hexrays.NO_CURLY_BRACES
USE_CURLY_BRACES = _ida_hexrays.USE_CURLY_BRACES
cif_t_swigregister = _ida_hexrays.cif_t_swigregister
cif_t_swigregister(cif_t)
cloop_t_swigregister = _ida_hexrays.cloop_t_swigregister
cloop_t_swigregister(cloop_t)
cfor_t_swigregister = _ida_hexrays.cfor_t_swigregister
cfor_t_swigregister(cfor_t)
cwhile_t_swigregister = _ida_hexrays.cwhile_t_swigregister
cwhile_t_swigregister(cwhile_t)
cdo_t_swigregister = _ida_hexrays.cdo_t_swigregister
cdo_t_swigregister(cdo_t)
creturn_t_swigregister = _ida_hexrays.creturn_t_swigregister
creturn_t_swigregister(creturn_t)
cgoto_t_swigregister = _ida_hexrays.cgoto_t_swigregister
cgoto_t_swigregister(cgoto_t)
casm_t_swigregister = _ida_hexrays.casm_t_swigregister
casm_t_swigregister(casm_t)
cinsn_t_swigregister = _ida_hexrays.cinsn_t_swigregister
cinsn_t_swigregister(cinsn_t)
def cinsn_t_insn_is_epilog(*args):
"""
cinsn_t_insn_is_epilog(insn) -> bool
"""
return _ida_hexrays.cinsn_t_insn_is_epilog(*args)
cblock_t_swigregister = _ida_hexrays.cblock_t_swigregister
cblock_t_swigregister(cblock_t)
carg_t_swigregister = _ida_hexrays.carg_t_swigregister
carg_t_swigregister(carg_t)
carglist_t_swigregister = _ida_hexrays.carglist_t_swigregister
carglist_t_swigregister(carglist_t)
CFL_FINAL = _ida_hexrays.CFL_FINAL
"""
call type is final, should not be changed
"""
CFL_HELPER = _ida_hexrays.CFL_HELPER
"""
created from a decompiler helper function
"""
ccase_t_swigregister = _ida_hexrays.ccase_t_swigregister
ccase_t_swigregister(ccase_t)
ccases_t_swigregister = _ida_hexrays.ccases_t_swigregister
ccases_t_swigregister(ccases_t)
cswitch_t_swigregister = _ida_hexrays.cswitch_t_swigregister
cswitch_t_swigregister(cswitch_t)
ctree_anchor_t_swigregister = _ida_hexrays.ctree_anchor_t_swigregister
ctree_anchor_t_swigregister(ctree_anchor_t)
ANCHOR_INDEX = _ida_hexrays.ANCHOR_INDEX
ANCHOR_MASK = _ida_hexrays.ANCHOR_MASK
ANCHOR_CITEM = _ida_hexrays.ANCHOR_CITEM
"""
c-tree item
"""
ANCHOR_LVAR = _ida_hexrays.ANCHOR_LVAR
"""
declaration of local variable
"""
ANCHOR_ITP = _ida_hexrays.ANCHOR_ITP
"""
item type preciser
"""
ANCHOR_BLKCMT = _ida_hexrays.ANCHOR_BLKCMT
"""
block comment (for ctree items)
"""
VDI_NONE = _ida_hexrays.VDI_NONE
VDI_EXPR = _ida_hexrays.VDI_EXPR
VDI_LVAR = _ida_hexrays.VDI_LVAR
VDI_FUNC = _ida_hexrays.VDI_FUNC
VDI_TAIL = _ida_hexrays.VDI_TAIL
ctree_item_t_swigregister = _ida_hexrays.ctree_item_t_swigregister
ctree_item_t_swigregister(ctree_item_t)
GLN_CURRENT = _ida_hexrays.GLN_CURRENT
"""
get label of the current item
"""
GLN_GOTO_TARGET = _ida_hexrays.GLN_GOTO_TARGET
"""
get goto target
"""
GLN_ALL = _ida_hexrays.GLN_ALL
"""
get both
"""
FORBID_UNUSED_LABELS = _ida_hexrays.FORBID_UNUSED_LABELS
ALLOW_UNUSED_LABELS = _ida_hexrays.ALLOW_UNUSED_LABELS
def _ll_lnot(*args):
"""
_ll_lnot(e) -> cexpr_t
"""
return _ida_hexrays._ll_lnot(*args)
def _ll_new_block(*args):
"""
_ll_new_block() -> cinsn_t
"""
return _ida_hexrays._ll_new_block(*args)
def _ll_create_helper(*args):
"""
_ll_create_helper(standalone, type, format) -> cexpr_t
"""
return _ida_hexrays._ll_create_helper(*args)
def _ll_call_helper(*args):
"""
_ll_call_helper(rettype, args, format) -> cexpr_t
"""
return _ida_hexrays._ll_call_helper(*args)
def _ll_make_num(*args):
"""
_ll_make_num(n, func=None, ea=BADADDR, opnum=0, sign=no_sign, size=0) -> cexpr_t
"""
return _ida_hexrays._ll_make_num(*args)
def _ll_make_ref(*args):
"""
_ll_make_ref(e) -> cexpr_t
"""
return _ida_hexrays._ll_make_ref(*args)
def _ll_dereference(*args):
"""
_ll_dereference(e, ptrsize, is_flt=False) -> cexpr_t
"""
return _ida_hexrays._ll_dereference(*args)
def save_user_labels(*args):
"""
save_user_labels(func_ea, user_labels)
Save user defined labels into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param user_labels: collection of user defined labels (C++: const
user_labels_t *)
"""
return _ida_hexrays.save_user_labels(*args)
def save_user_cmts(*args):
"""
save_user_cmts(func_ea, user_cmts)
Save user defined comments into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param user_cmts: collection of user defined comments (C++: const
user_cmts_t *)
"""
return _ida_hexrays.save_user_cmts(*args)
def save_user_numforms(*args):
"""
save_user_numforms(func_ea, numforms)
Save user defined number formats into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param numforms: collection of user defined comments (C++: const
user_numforms_t *)
"""
return _ida_hexrays.save_user_numforms(*args)
def save_user_iflags(*args):
"""
save_user_iflags(func_ea, iflags)
Save user defined citem iflags into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param iflags: collection of user defined citem iflags (C++: const
user_iflags_t *)
"""
return _ida_hexrays.save_user_iflags(*args)
def save_user_unions(*args):
"""
save_user_unions(func_ea, unions)
Save user defined union field selections into the database.
@param func_ea: the entry address of the function (C++: ea_t)
@param unions: collection of union field selections (C++: const
user_unions_t *)
"""
return _ida_hexrays.save_user_unions(*args)
def restore_user_labels(*args):
"""
restore_user_labels(func_ea) -> user_labels_t
Restore user defined labels from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of user defined labels. The returned object must
be deleted by the caller using delete_user_labels()
"""
return _ida_hexrays.restore_user_labels(*args)
def restore_user_cmts(*args):
"""
restore_user_cmts(func_ea) -> user_cmts_t
Restore user defined comments from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of user defined comments. The returned object must
be deleted by the caller using delete_user_cmts()
"""
return _ida_hexrays.restore_user_cmts(*args)
def restore_user_numforms(*args):
"""
restore_user_numforms(func_ea) -> user_numforms_t
Restore user defined number formats from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of user defined number formats. The returned
object must be deleted by the caller using
delete_user_numforms()
"""
return _ida_hexrays.restore_user_numforms(*args)
def restore_user_iflags(*args):
"""
restore_user_iflags(func_ea) -> user_iflags_t
Restore user defined citem iflags from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of user defined iflags. The returned object must
be deleted by the caller using delete_user_iflags()
"""
return _ida_hexrays.restore_user_iflags(*args)
def restore_user_unions(*args):
"""
restore_user_unions(func_ea) -> user_unions_t
Restore user defined union field selections from the database.
@param func_ea: the entry address of the function (C++: ea_t)
@return: collection of union field selections The returned object must
be deleted by the caller using delete_user_unions()
"""
return _ida_hexrays.restore_user_unions(*args)
cfunc_t_swigregister = _ida_hexrays.cfunc_t_swigregister
cfunc_t_swigregister(cfunc_t)
CIT_COLLAPSED = _ida_hexrays.CIT_COLLAPSED
"""
display element in collapsed form
"""
CFS_BOUNDS = _ida_hexrays.CFS_BOUNDS
"""
'eamap' and 'boundaries' are ready
"""
CFS_TEXT = _ida_hexrays.CFS_TEXT
"""
'sv' is ready (and hdrlines)
"""
CFS_LVARS_HIDDEN = _ida_hexrays.CFS_LVARS_HIDDEN
"""
local variable definitions are collapsed
"""
DECOMP_NO_WAIT = _ida_hexrays.DECOMP_NO_WAIT
"""
do not display waitbox
"""
DECOMP_NO_CACHE = _ida_hexrays.DECOMP_NO_CACHE
"""
do not use decompilation cache
"""
DECOMP_NO_FRAME = _ida_hexrays.DECOMP_NO_FRAME
"""
do not use function frame info (only snippet mode)
"""
DECOMP_WARNINGS = _ida_hexrays.DECOMP_WARNINGS
"""
display warnings in the output window
"""
def decompile(*args):
"""
decompile(mbr, hf, flags=0) -> cfuncptr_t
Decompile a snippet or a function.
@param mbr: what to decompile (C++: const mba_ranges_t &)
@param hf: extended error information (if failed) (C++:
hexrays_failure_t *)
@param flags: bitwise combination of decompile() flags ... bits (C++:
int)
@return: pointer to the decompilation result (a reference counted
pointer). NULL if failed.
"""
return _ida_hexrays.decompile(*args)
def decompile_func(*args):
"""
decompile_func(pfn, hf, flags=0) -> cfuncptr_t
Decompile a function. Multiple decompilations of the same function
return the same object.
@param pfn: pointer to function to decompile (C++: func_t *)
@param hf: extended error information (if failed) (C++:
hexrays_failure_t *)
@param flags: bitwise combination of decompile() flags ... bits (C++:
int)
@return: pointer to the decompilation result (a reference counted
pointer). NULL if failed.
"""
return _ida_hexrays.decompile_func(*args)
def gen_microcode(*args):
"""
gen_microcode(mbr, hf, retlist=None, flags=0, reqmat=MMAT_GLBOPT3) -> mbl_array_t
Generate microcode of an arbitrary code snippet
@param mbr: snippet ranges (C++: const mba_ranges_t &)
@param hf: extended error information (if failed) (C++:
hexrays_failure_t *)
@param retlist: list of registers the snippet returns (C++: const
mlist_t *)
@param flags: bitwise combination of decompile() flags ... bits (C++:
int)
@param reqmat: required microcode maturity (C++: mba_maturity_t)
@return: pointer to the microcode, NULL if failed.
"""
return _ida_hexrays.gen_microcode(*args)
def mark_cfunc_dirty(*args):
"""
mark_cfunc_dirty(ea, close_views=False) -> bool
Flush the cached decompilation results. Erases a cache entry for the
specified function.
@param ea: function to erase from the cache (C++: ea_t)
@param close_views: close pseudocode windows that show the function
(C++: bool)
@return: if a cache entry existed.
"""
return _ida_hexrays.mark_cfunc_dirty(*args)
def clear_cached_cfuncs(*args):
"""
clear_cached_cfuncs()
Flush all cached decompilation results.
"""
return _ida_hexrays.clear_cached_cfuncs(*args)
def has_cached_cfunc(*args):
"""
has_cached_cfunc(ea) -> bool
Do we have a cached decompilation result for 'ea'?
@param ea (C++: ea_t)
"""
return _ida_hexrays.has_cached_cfunc(*args)
def get_ctype_name(*args):
"""
get_ctype_name(op) -> char const *
"""
return _ida_hexrays.get_ctype_name(*args)
def create_field_name(*args):
"""
create_field_name(type, offset=BADADDR) -> qstring
"""
return _ida_hexrays.create_field_name(*args)
hxe_flowchart = _ida_hexrays.hxe_flowchart
hxe_stkpnts = _ida_hexrays.hxe_stkpnts
hxe_prolog = _ida_hexrays.hxe_prolog
hxe_microcode = _ida_hexrays.hxe_microcode
hxe_preoptimized = _ida_hexrays.hxe_preoptimized
hxe_locopt = _ida_hexrays.hxe_locopt
hxe_prealloc = _ida_hexrays.hxe_prealloc
hxe_glbopt = _ida_hexrays.hxe_glbopt
hxe_structural = _ida_hexrays.hxe_structural
hxe_maturity = _ida_hexrays.hxe_maturity
hxe_interr = _ida_hexrays.hxe_interr
hxe_combine = _ida_hexrays.hxe_combine
hxe_print_func = _ida_hexrays.hxe_print_func
hxe_func_printed = _ida_hexrays.hxe_func_printed
hxe_resolve_stkaddrs = _ida_hexrays.hxe_resolve_stkaddrs
hxe_open_pseudocode = _ida_hexrays.hxe_open_pseudocode
hxe_switch_pseudocode = _ida_hexrays.hxe_switch_pseudocode
hxe_refresh_pseudocode = _ida_hexrays.hxe_refresh_pseudocode
hxe_close_pseudocode = _ida_hexrays.hxe_close_pseudocode
hxe_keyboard = _ida_hexrays.hxe_keyboard
hxe_right_click = _ida_hexrays.hxe_right_click
hxe_double_click = _ida_hexrays.hxe_double_click
hxe_curpos = _ida_hexrays.hxe_curpos
hxe_create_hint = _ida_hexrays.hxe_create_hint
hxe_text_ready = _ida_hexrays.hxe_text_ready
hxe_populating_popup = _ida_hexrays.hxe_populating_popup
lxe_lvar_name_changed = _ida_hexrays.lxe_lvar_name_changed
lxe_lvar_type_changed = _ida_hexrays.lxe_lvar_type_changed
lxe_lvar_cmt_changed = _ida_hexrays.lxe_lvar_cmt_changed
lxe_lvar_mapping_changed = _ida_hexrays.lxe_lvar_mapping_changed
hxe_cmt_changed = _ida_hexrays.hxe_cmt_changed
USE_KEYBOARD = _ida_hexrays.USE_KEYBOARD
USE_MOUSE = _ida_hexrays.USE_MOUSE
ctext_position_t_swigregister = _ida_hexrays.ctext_position_t_swigregister
ctext_position_t_swigregister(ctext_position_t)
HEXRAYS_API_MAGIC = cvar.HEXRAYS_API_MAGIC
history_item_t_swigregister = _ida_hexrays.history_item_t_swigregister
history_item_t_swigregister(history_item_t)
vdui_t_swigregister = _ida_hexrays.vdui_t_swigregister
vdui_t_swigregister(vdui_t)
CMT_NONE = cvar.CMT_NONE
CMT_TAIL = cvar.CMT_TAIL
CMT_BLOCK1 = cvar.CMT_BLOCK1
CMT_BLOCK2 = cvar.CMT_BLOCK2
CMT_LVAR = cvar.CMT_LVAR
CMT_FUNC = cvar.CMT_FUNC
CMT_ALL = cvar.CMT_ALL
VDUI_VISIBLE = _ida_hexrays.VDUI_VISIBLE
"""
is visible?
"""
VDUI_VALID = _ida_hexrays.VDUI_VALID
"""
is valid?
"""
VDUI_LOCKED = _ida_hexrays.VDUI_LOCKED
"""
is locked?
"""
ui_stroff_op_t_swigregister = _ida_hexrays.ui_stroff_op_t_swigregister
ui_stroff_op_t_swigregister(ui_stroff_op_t)
ui_stroff_applicator_t_swigregister = _ida_hexrays.ui_stroff_applicator_t_swigregister
ui_stroff_applicator_t_swigregister(ui_stroff_applicator_t)
def select_udt_by_offset(*args):
"""
select_udt_by_offset(udts, ops, applicator) -> int
Select UDT
@param udts: list of UDT tinfo_t for the selection, if NULL or empty
then UDTs from the "Local types" will be used (C++: const
qvector < tinfo_t > *)
@param ops: operands (C++: const ui_stroff_ops_t &)
@param applicator (C++: ui_stroff_applicator_t &)
"""
return _ida_hexrays.select_udt_by_offset(*args)
hx_user_numforms_begin = _ida_hexrays.hx_user_numforms_begin
hx_user_numforms_end = _ida_hexrays.hx_user_numforms_end
hx_user_numforms_next = _ida_hexrays.hx_user_numforms_next
hx_user_numforms_prev = _ida_hexrays.hx_user_numforms_prev
hx_user_numforms_first = _ida_hexrays.hx_user_numforms_first
hx_user_numforms_second = _ida_hexrays.hx_user_numforms_second
hx_user_numforms_find = _ida_hexrays.hx_user_numforms_find
hx_user_numforms_insert = _ida_hexrays.hx_user_numforms_insert
hx_user_numforms_erase = _ida_hexrays.hx_user_numforms_erase
hx_user_numforms_clear = _ida_hexrays.hx_user_numforms_clear
hx_user_numforms_size = _ida_hexrays.hx_user_numforms_size
hx_user_numforms_free = _ida_hexrays.hx_user_numforms_free
hx_user_numforms_new = _ida_hexrays.hx_user_numforms_new
hx_lvar_mapping_begin = _ida_hexrays.hx_lvar_mapping_begin
hx_lvar_mapping_end = _ida_hexrays.hx_lvar_mapping_end
hx_lvar_mapping_next = _ida_hexrays.hx_lvar_mapping_next
hx_lvar_mapping_prev = _ida_hexrays.hx_lvar_mapping_prev
hx_lvar_mapping_first = _ida_hexrays.hx_lvar_mapping_first
hx_lvar_mapping_second = _ida_hexrays.hx_lvar_mapping_second
hx_lvar_mapping_find = _ida_hexrays.hx_lvar_mapping_find
hx_lvar_mapping_insert = _ida_hexrays.hx_lvar_mapping_insert
hx_lvar_mapping_erase = _ida_hexrays.hx_lvar_mapping_erase
hx_lvar_mapping_clear = _ida_hexrays.hx_lvar_mapping_clear
hx_lvar_mapping_size = _ida_hexrays.hx_lvar_mapping_size
hx_lvar_mapping_free = _ida_hexrays.hx_lvar_mapping_free
hx_lvar_mapping_new = _ida_hexrays.hx_lvar_mapping_new
hx_udcall_map_begin = _ida_hexrays.hx_udcall_map_begin
hx_udcall_map_end = _ida_hexrays.hx_udcall_map_end
hx_udcall_map_next = _ida_hexrays.hx_udcall_map_next
hx_udcall_map_prev = _ida_hexrays.hx_udcall_map_prev
hx_udcall_map_first = _ida_hexrays.hx_udcall_map_first
hx_udcall_map_second = _ida_hexrays.hx_udcall_map_second
hx_udcall_map_find = _ida_hexrays.hx_udcall_map_find
hx_udcall_map_insert = _ida_hexrays.hx_udcall_map_insert
hx_udcall_map_erase = _ida_hexrays.hx_udcall_map_erase
hx_udcall_map_clear = _ida_hexrays.hx_udcall_map_clear
hx_udcall_map_size = _ida_hexrays.hx_udcall_map_size
hx_udcall_map_free = _ida_hexrays.hx_udcall_map_free
hx_udcall_map_new = _ida_hexrays.hx_udcall_map_new
hx_user_cmts_begin = _ida_hexrays.hx_user_cmts_begin
hx_user_cmts_end = _ida_hexrays.hx_user_cmts_end
hx_user_cmts_next = _ida_hexrays.hx_user_cmts_next
hx_user_cmts_prev = _ida_hexrays.hx_user_cmts_prev
hx_user_cmts_first = _ida_hexrays.hx_user_cmts_first
hx_user_cmts_second = _ida_hexrays.hx_user_cmts_second
hx_user_cmts_find = _ida_hexrays.hx_user_cmts_find
hx_user_cmts_insert = _ida_hexrays.hx_user_cmts_insert
hx_user_cmts_erase = _ida_hexrays.hx_user_cmts_erase
hx_user_cmts_clear = _ida_hexrays.hx_user_cmts_clear
hx_user_cmts_size = _ida_hexrays.hx_user_cmts_size
hx_user_cmts_free = _ida_hexrays.hx_user_cmts_free
hx_user_cmts_new = _ida_hexrays.hx_user_cmts_new
hx_user_iflags_begin = _ida_hexrays.hx_user_iflags_begin
hx_user_iflags_end = _ida_hexrays.hx_user_iflags_end
hx_user_iflags_next = _ida_hexrays.hx_user_iflags_next
hx_user_iflags_prev = _ida_hexrays.hx_user_iflags_prev
hx_user_iflags_first = _ida_hexrays.hx_user_iflags_first
hx_user_iflags_second = _ida_hexrays.hx_user_iflags_second
hx_user_iflags_find = _ida_hexrays.hx_user_iflags_find
hx_user_iflags_insert = _ida_hexrays.hx_user_iflags_insert
hx_user_iflags_erase = _ida_hexrays.hx_user_iflags_erase
hx_user_iflags_clear = _ida_hexrays.hx_user_iflags_clear
hx_user_iflags_size = _ida_hexrays.hx_user_iflags_size
hx_user_iflags_free = _ida_hexrays.hx_user_iflags_free
hx_user_iflags_new = _ida_hexrays.hx_user_iflags_new
hx_user_unions_begin = _ida_hexrays.hx_user_unions_begin
hx_user_unions_end = _ida_hexrays.hx_user_unions_end
hx_user_unions_next = _ida_hexrays.hx_user_unions_next
hx_user_unions_prev = _ida_hexrays.hx_user_unions_prev
hx_user_unions_first = _ida_hexrays.hx_user_unions_first
hx_user_unions_second = _ida_hexrays.hx_user_unions_second
hx_user_unions_find = _ida_hexrays.hx_user_unions_find
hx_user_unions_insert = _ida_hexrays.hx_user_unions_insert
hx_user_unions_erase = _ida_hexrays.hx_user_unions_erase
hx_user_unions_clear = _ida_hexrays.hx_user_unions_clear
hx_user_unions_size = _ida_hexrays.hx_user_unions_size
hx_user_unions_free = _ida_hexrays.hx_user_unions_free
hx_user_unions_new = _ida_hexrays.hx_user_unions_new
hx_user_labels_begin = _ida_hexrays.hx_user_labels_begin
hx_user_labels_end = _ida_hexrays.hx_user_labels_end
hx_user_labels_next = _ida_hexrays.hx_user_labels_next
hx_user_labels_prev = _ida_hexrays.hx_user_labels_prev
hx_user_labels_first = _ida_hexrays.hx_user_labels_first
hx_user_labels_second = _ida_hexrays.hx_user_labels_second
hx_user_labels_find = _ida_hexrays.hx_user_labels_find
hx_user_labels_insert = _ida_hexrays.hx_user_labels_insert
hx_user_labels_erase = _ida_hexrays.hx_user_labels_erase
hx_user_labels_clear = _ida_hexrays.hx_user_labels_clear
hx_user_labels_size = _ida_hexrays.hx_user_labels_size
hx_user_labels_free = _ida_hexrays.hx_user_labels_free
hx_user_labels_new = _ida_hexrays.hx_user_labels_new
hx_eamap_begin = _ida_hexrays.hx_eamap_begin
hx_eamap_end = _ida_hexrays.hx_eamap_end
hx_eamap_next = _ida_hexrays.hx_eamap_next
hx_eamap_prev = _ida_hexrays.hx_eamap_prev
hx_eamap_first = _ida_hexrays.hx_eamap_first
hx_eamap_second = _ida_hexrays.hx_eamap_second
hx_eamap_find = _ida_hexrays.hx_eamap_find
hx_eamap_insert = _ida_hexrays.hx_eamap_insert
hx_eamap_erase = _ida_hexrays.hx_eamap_erase
hx_eamap_clear = _ida_hexrays.hx_eamap_clear
hx_eamap_size = _ida_hexrays.hx_eamap_size
hx_eamap_free = _ida_hexrays.hx_eamap_free
hx_eamap_new = _ida_hexrays.hx_eamap_new
hx_boundaries_begin = _ida_hexrays.hx_boundaries_begin
hx_boundaries_end = _ida_hexrays.hx_boundaries_end
hx_boundaries_next = _ida_hexrays.hx_boundaries_next
hx_boundaries_prev = _ida_hexrays.hx_boundaries_prev
hx_boundaries_first = _ida_hexrays.hx_boundaries_first
hx_boundaries_second = _ida_hexrays.hx_boundaries_second
hx_boundaries_find = _ida_hexrays.hx_boundaries_find
hx_boundaries_insert = _ida_hexrays.hx_boundaries_insert
hx_boundaries_erase = _ida_hexrays.hx_boundaries_erase
hx_boundaries_clear = _ida_hexrays.hx_boundaries_clear
hx_boundaries_size = _ida_hexrays.hx_boundaries_size
hx_boundaries_free = _ida_hexrays.hx_boundaries_free
hx_boundaries_new = _ida_hexrays.hx_boundaries_new
hx_block_chains_begin = _ida_hexrays.hx_block_chains_begin
hx_block_chains_end = _ida_hexrays.hx_block_chains_end
hx_block_chains_next = _ida_hexrays.hx_block_chains_next
hx_block_chains_prev = _ida_hexrays.hx_block_chains_prev
hx_block_chains_get = _ida_hexrays.hx_block_chains_get
hx_block_chains_find = _ida_hexrays.hx_block_chains_find
hx_block_chains_insert = _ida_hexrays.hx_block_chains_insert
hx_block_chains_erase = _ida_hexrays.hx_block_chains_erase
hx_block_chains_clear = _ida_hexrays.hx_block_chains_clear
hx_block_chains_size = _ida_hexrays.hx_block_chains_size
hx_block_chains_free = _ida_hexrays.hx_block_chains_free
hx_block_chains_new = _ida_hexrays.hx_block_chains_new
hx_valrng_t_clear = _ida_hexrays.hx_valrng_t_clear
hx_valrng_t_copy = _ida_hexrays.hx_valrng_t_copy
hx_valrng_t_assign = _ida_hexrays.hx_valrng_t_assign
hx_valrng_t_compare = _ida_hexrays.hx_valrng_t_compare
hx_valrng_t_set_eq = _ida_hexrays.hx_valrng_t_set_eq
hx_valrng_t_set_cmp = _ida_hexrays.hx_valrng_t_set_cmp
hx_valrng_t_reduce_size = _ida_hexrays.hx_valrng_t_reduce_size
hx_valrng_t_intersect_with = _ida_hexrays.hx_valrng_t_intersect_with
hx_valrng_t_unite_with = _ida_hexrays.hx_valrng_t_unite_with
hx_valrng_t_inverse = _ida_hexrays.hx_valrng_t_inverse
hx_valrng_t_has = _ida_hexrays.hx_valrng_t_has
hx_valrng_t_print = _ida_hexrays.hx_valrng_t_print
hx_valrng_t_dstr = _ida_hexrays.hx_valrng_t_dstr
hx_valrng_t_cvt_to_single_value = _ida_hexrays.hx_valrng_t_cvt_to_single_value
hx_valrng_t_cvt_to_cmp = _ida_hexrays.hx_valrng_t_cvt_to_cmp
hx_get_merror_desc = _ida_hexrays.hx_get_merror_desc
hx_reg2mreg = _ida_hexrays.hx_reg2mreg
hx_mreg2reg = _ida_hexrays.hx_mreg2reg
hx_install_optinsn_handler = _ida_hexrays.hx_install_optinsn_handler
hx_remove_optinsn_handler = _ida_hexrays.hx_remove_optinsn_handler
hx_install_optblock_handler = _ida_hexrays.hx_install_optblock_handler
hx_remove_optblock_handler = _ida_hexrays.hx_remove_optblock_handler
hx_must_mcode_close_block = _ida_hexrays.hx_must_mcode_close_block
hx_is_mcode_propagatable = _ida_hexrays.hx_is_mcode_propagatable
hx_negate_mcode_relation = _ida_hexrays.hx_negate_mcode_relation
hx_swap_mcode_relation = _ida_hexrays.hx_swap_mcode_relation
hx_get_signed_mcode = _ida_hexrays.hx_get_signed_mcode
hx_get_unsigned_mcode = _ida_hexrays.hx_get_unsigned_mcode
hx_mcode_modifies_d = _ida_hexrays.hx_mcode_modifies_d
hx_operand_locator_t_compare = _ida_hexrays.hx_operand_locator_t_compare
hx_vd_printer_t_print = _ida_hexrays.hx_vd_printer_t_print
hx_file_printer_t_print = _ida_hexrays.hx_file_printer_t_print
hx_qstring_printer_t_print = _ida_hexrays.hx_qstring_printer_t_print
hx_dstr = _ida_hexrays.hx_dstr
hx_is_type_correct = _ida_hexrays.hx_is_type_correct
hx_is_small_udt = _ida_hexrays.hx_is_small_udt
hx_is_nonbool_type = _ida_hexrays.hx_is_nonbool_type
hx_is_bool_type = _ida_hexrays.hx_is_bool_type
hx_partial_type_num = _ida_hexrays.hx_partial_type_num
hx_get_float_type = _ida_hexrays.hx_get_float_type
hx_get_int_type_by_width_and_sign = _ida_hexrays.hx_get_int_type_by_width_and_sign
hx_get_unk_type = _ida_hexrays.hx_get_unk_type
hx_dummy_ptrtype = _ida_hexrays.hx_dummy_ptrtype
hx_get_member_type = _ida_hexrays.hx_get_member_type
hx_make_pointer = _ida_hexrays.hx_make_pointer
hx_create_typedef = _ida_hexrays.hx_create_typedef
hx_get_type = _ida_hexrays.hx_get_type
hx_set_type = _ida_hexrays.hx_set_type
hx_vdloc_t_dstr = _ida_hexrays.hx_vdloc_t_dstr
hx_vdloc_t_compare = _ida_hexrays.hx_vdloc_t_compare
hx_vdloc_t_is_aliasable = _ida_hexrays.hx_vdloc_t_is_aliasable
hx_print_vdloc = _ida_hexrays.hx_print_vdloc
hx_arglocs_overlap = _ida_hexrays.hx_arglocs_overlap
hx_lvar_locator_t_compare = _ida_hexrays.hx_lvar_locator_t_compare
hx_lvar_locator_t_dstr = _ida_hexrays.hx_lvar_locator_t_dstr
hx_lvar_t_dstr = _ida_hexrays.hx_lvar_t_dstr
hx_lvar_t_is_promoted_arg = _ida_hexrays.hx_lvar_t_is_promoted_arg
hx_lvar_t_accepts_type = _ida_hexrays.hx_lvar_t_accepts_type
hx_lvar_t_set_lvar_type = _ida_hexrays.hx_lvar_t_set_lvar_type
hx_lvar_t_set_width = _ida_hexrays.hx_lvar_t_set_width
hx_lvar_t_append_list = _ida_hexrays.hx_lvar_t_append_list
hx_lvars_t_find_stkvar = _ida_hexrays.hx_lvars_t_find_stkvar
hx_lvars_t_find = _ida_hexrays.hx_lvars_t_find
hx_lvars_t_find_lvar = _ida_hexrays.hx_lvars_t_find_lvar
hx_restore_user_lvar_settings = _ida_hexrays.hx_restore_user_lvar_settings
hx_save_user_lvar_settings = _ida_hexrays.hx_save_user_lvar_settings
hx_modify_user_lvars = _ida_hexrays.hx_modify_user_lvars
hx_restore_user_defined_calls = _ida_hexrays.hx_restore_user_defined_calls
hx_save_user_defined_calls = _ida_hexrays.hx_save_user_defined_calls
hx_parse_user_call = _ida_hexrays.hx_parse_user_call
hx_convert_to_user_call = _ida_hexrays.hx_convert_to_user_call
hx_install_microcode_filter = _ida_hexrays.hx_install_microcode_filter
hx_udc_filter_t_init = _ida_hexrays.hx_udc_filter_t_init
hx_udc_filter_t_apply = _ida_hexrays.hx_udc_filter_t_apply
hx_bitset_t_bitset_t = _ida_hexrays.hx_bitset_t_bitset_t
hx_bitset_t_copy = _ida_hexrays.hx_bitset_t_copy
hx_bitset_t_add = _ida_hexrays.hx_bitset_t_add
hx_bitset_t_add_ = _ida_hexrays.hx_bitset_t_add_
hx_bitset_t_add__ = _ida_hexrays.hx_bitset_t_add__
hx_bitset_t_sub = _ida_hexrays.hx_bitset_t_sub
hx_bitset_t_sub_ = _ida_hexrays.hx_bitset_t_sub_
hx_bitset_t_sub__ = _ida_hexrays.hx_bitset_t_sub__
hx_bitset_t_cut_at = _ida_hexrays.hx_bitset_t_cut_at
hx_bitset_t_shift_down = _ida_hexrays.hx_bitset_t_shift_down
hx_bitset_t_has = _ida_hexrays.hx_bitset_t_has
hx_bitset_t_has_all = _ida_hexrays.hx_bitset_t_has_all
hx_bitset_t_has_any = _ida_hexrays.hx_bitset_t_has_any
hx_bitset_t_dstr = _ida_hexrays.hx_bitset_t_dstr
hx_bitset_t_empty = _ida_hexrays.hx_bitset_t_empty
hx_bitset_t_count = _ida_hexrays.hx_bitset_t_count
hx_bitset_t_count_ = _ida_hexrays.hx_bitset_t_count_
hx_bitset_t_last = _ida_hexrays.hx_bitset_t_last
hx_bitset_t_fill_with_ones = _ida_hexrays.hx_bitset_t_fill_with_ones
hx_bitset_t_has_common = _ida_hexrays.hx_bitset_t_has_common
hx_bitset_t_intersect = _ida_hexrays.hx_bitset_t_intersect
hx_bitset_t_is_subset_of = _ida_hexrays.hx_bitset_t_is_subset_of
hx_bitset_t_compare = _ida_hexrays.hx_bitset_t_compare
hx_bitset_t_goup = _ida_hexrays.hx_bitset_t_goup
hx_ivl_t_dstr = _ida_hexrays.hx_ivl_t_dstr
hx_ivl_t_compare = _ida_hexrays.hx_ivl_t_compare
hx_ivlset_t_add = _ida_hexrays.hx_ivlset_t_add
hx_ivlset_t_add_ = _ida_hexrays.hx_ivlset_t_add_
hx_ivlset_t_addmasked = _ida_hexrays.hx_ivlset_t_addmasked
hx_ivlset_t_sub = _ida_hexrays.hx_ivlset_t_sub
hx_ivlset_t_sub_ = _ida_hexrays.hx_ivlset_t_sub_
hx_ivlset_t_has_common = _ida_hexrays.hx_ivlset_t_has_common
hx_ivlset_t_print = _ida_hexrays.hx_ivlset_t_print
hx_ivlset_t_dstr = _ida_hexrays.hx_ivlset_t_dstr
hx_ivlset_t_count = _ida_hexrays.hx_ivlset_t_count
hx_ivlset_t_has_common_ = _ida_hexrays.hx_ivlset_t_has_common_
hx_ivlset_t_contains = _ida_hexrays.hx_ivlset_t_contains
hx_ivlset_t_includes = _ida_hexrays.hx_ivlset_t_includes
hx_ivlset_t_intersect = _ida_hexrays.hx_ivlset_t_intersect
hx_ivlset_t_compare = _ida_hexrays.hx_ivlset_t_compare
hx_get_mreg_name = _ida_hexrays.hx_get_mreg_name
hx_rlist_t_print = _ida_hexrays.hx_rlist_t_print
hx_rlist_t_dstr = _ida_hexrays.hx_rlist_t_dstr
hx_mlist_t_addmem = _ida_hexrays.hx_mlist_t_addmem
hx_mlist_t_print = _ida_hexrays.hx_mlist_t_print
hx_mlist_t_dstr = _ida_hexrays.hx_mlist_t_dstr
hx_mlist_t_compare = _ida_hexrays.hx_mlist_t_compare
hx_lvar_ref_t_compare = _ida_hexrays.hx_lvar_ref_t_compare
hx_lvar_ref_t_var = _ida_hexrays.hx_lvar_ref_t_var
hx_stkvar_ref_t_compare = _ida_hexrays.hx_stkvar_ref_t_compare
hx_stkvar_ref_t_get_stkvar = _ida_hexrays.hx_stkvar_ref_t_get_stkvar
hx_fnumber_t_print = _ida_hexrays.hx_fnumber_t_print
hx_fnumber_t_dstr = _ida_hexrays.hx_fnumber_t_dstr
hx_mop_t_copy = _ida_hexrays.hx_mop_t_copy
hx_mop_t_assign = _ida_hexrays.hx_mop_t_assign
hx_mop_t_swap = _ida_hexrays.hx_mop_t_swap
hx_mop_t_erase = _ida_hexrays.hx_mop_t_erase
hx_mop_t_print = _ida_hexrays.hx_mop_t_print
hx_mop_t_dstr = _ida_hexrays.hx_mop_t_dstr
hx_mop_t_create_from_mlist = _ida_hexrays.hx_mop_t_create_from_mlist
hx_mop_t_create_from_ivlset = _ida_hexrays.hx_mop_t_create_from_ivlset
hx_mop_t_create_from_vdloc = _ida_hexrays.hx_mop_t_create_from_vdloc
hx_mop_t_create_from_scattered_vdloc = _ida_hexrays.hx_mop_t_create_from_scattered_vdloc
hx_mop_t_create_from_insn = _ida_hexrays.hx_mop_t_create_from_insn
hx_mop_t_make_number = _ida_hexrays.hx_mop_t_make_number
hx_mop_t_make_fpnum = _ida_hexrays.hx_mop_t_make_fpnum
hx_mop_t_make_reg_pair = _ida_hexrays.hx_mop_t_make_reg_pair
hx_mop_t_make_helper = _ida_hexrays.hx_mop_t_make_helper
hx_mop_t_is_bit_reg = _ida_hexrays.hx_mop_t_is_bit_reg
hx_mop_t_may_use_aliased_memory = _ida_hexrays.hx_mop_t_may_use_aliased_memory
hx_mop_t_is01 = _ida_hexrays.hx_mop_t_is01
hx_mop_t_is_sign_extended_from = _ida_hexrays.hx_mop_t_is_sign_extended_from
hx_mop_t_is_zero_extended_from = _ida_hexrays.hx_mop_t_is_zero_extended_from
hx_mop_t_equal_mops = _ida_hexrays.hx_mop_t_equal_mops
hx_mop_t_lexcompare = _ida_hexrays.hx_mop_t_lexcompare
hx_mop_t_for_all_ops = _ida_hexrays.hx_mop_t_for_all_ops
hx_mop_t_for_all_scattered_submops = _ida_hexrays.hx_mop_t_for_all_scattered_submops
hx_mop_t_is_constant = _ida_hexrays.hx_mop_t_is_constant
hx_mop_t_get_stkoff = _ida_hexrays.hx_mop_t_get_stkoff
hx_mop_t_make_low_half = _ida_hexrays.hx_mop_t_make_low_half
hx_mop_t_make_high_half = _ida_hexrays.hx_mop_t_make_high_half
hx_mop_t_make_first_half = _ida_hexrays.hx_mop_t_make_first_half
hx_mop_t_make_second_half = _ida_hexrays.hx_mop_t_make_second_half
hx_mop_t_shift_mop = _ida_hexrays.hx_mop_t_shift_mop
hx_mop_t_change_size = _ida_hexrays.hx_mop_t_change_size
hx_mop_t_preserve_side_effects = _ida_hexrays.hx_mop_t_preserve_side_effects
hx_mop_t_apply_ld_mcode = _ida_hexrays.hx_mop_t_apply_ld_mcode
hx_mcallarg_t_print = _ida_hexrays.hx_mcallarg_t_print
hx_mcallarg_t_dstr = _ida_hexrays.hx_mcallarg_t_dstr
hx_mcallarg_t_set_regarg = _ida_hexrays.hx_mcallarg_t_set_regarg
hx_mcallinfo_t_lexcompare = _ida_hexrays.hx_mcallinfo_t_lexcompare
hx_mcallinfo_t_set_type = _ida_hexrays.hx_mcallinfo_t_set_type
hx_mcallinfo_t_get_type = _ida_hexrays.hx_mcallinfo_t_get_type
hx_mcallinfo_t_print = _ida_hexrays.hx_mcallinfo_t_print
hx_mcallinfo_t_dstr = _ida_hexrays.hx_mcallinfo_t_dstr
hx_mcases_t_compare = _ida_hexrays.hx_mcases_t_compare
hx_mcases_t_print = _ida_hexrays.hx_mcases_t_print
hx_mcases_t_dstr = _ida_hexrays.hx_mcases_t_dstr
hx_vivl_t_extend_to_cover = _ida_hexrays.hx_vivl_t_extend_to_cover
hx_vivl_t_intersect = _ida_hexrays.hx_vivl_t_intersect
hx_vivl_t_print = _ida_hexrays.hx_vivl_t_print
hx_vivl_t_dstr = _ida_hexrays.hx_vivl_t_dstr
hx_chain_t_print = _ida_hexrays.hx_chain_t_print
hx_chain_t_dstr = _ida_hexrays.hx_chain_t_dstr
hx_chain_t_append_list = _ida_hexrays.hx_chain_t_append_list
hx_block_chains_t_get_chain = _ida_hexrays.hx_block_chains_t_get_chain
hx_block_chains_t_print = _ida_hexrays.hx_block_chains_t_print
hx_block_chains_t_dstr = _ida_hexrays.hx_block_chains_t_dstr
hx_graph_chains_t_for_all_chains = _ida_hexrays.hx_graph_chains_t_for_all_chains
hx_graph_chains_t_release = _ida_hexrays.hx_graph_chains_t_release
hx_minsn_t_init = _ida_hexrays.hx_minsn_t_init
hx_minsn_t_copy = _ida_hexrays.hx_minsn_t_copy
hx_minsn_t_swap = _ida_hexrays.hx_minsn_t_swap
hx_minsn_t_print = _ida_hexrays.hx_minsn_t_print
hx_minsn_t_dstr = _ida_hexrays.hx_minsn_t_dstr
hx_minsn_t_setaddr = _ida_hexrays.hx_minsn_t_setaddr
hx_minsn_t_optimize_subtree = _ida_hexrays.hx_minsn_t_optimize_subtree
hx_minsn_t_for_all_ops = _ida_hexrays.hx_minsn_t_for_all_ops
hx_minsn_t_for_all_insns = _ida_hexrays.hx_minsn_t_for_all_insns
hx_minsn_t__make_nop = _ida_hexrays.hx_minsn_t__make_nop
hx_minsn_t_equal_insns = _ida_hexrays.hx_minsn_t_equal_insns
hx_minsn_t_lexcompare = _ida_hexrays.hx_minsn_t_lexcompare
hx_minsn_t_is_noret_call = _ida_hexrays.hx_minsn_t_is_noret_call
hx_minsn_t_is_helper = _ida_hexrays.hx_minsn_t_is_helper
hx_minsn_t_find_call = _ida_hexrays.hx_minsn_t_find_call
hx_minsn_t_has_side_effects = _ida_hexrays.hx_minsn_t_has_side_effects
hx_minsn_t_find_opcode = _ida_hexrays.hx_minsn_t_find_opcode
hx_minsn_t_find_ins_op = _ida_hexrays.hx_minsn_t_find_ins_op
hx_minsn_t_find_num_op = _ida_hexrays.hx_minsn_t_find_num_op
hx_minsn_t_modifes_d = _ida_hexrays.hx_minsn_t_modifes_d
hx_minsn_t_is_between = _ida_hexrays.hx_minsn_t_is_between
hx_minsn_t_may_use_aliased_memory = _ida_hexrays.hx_minsn_t_may_use_aliased_memory
hx_getf_reginsn = _ida_hexrays.hx_getf_reginsn
hx_getb_reginsn = _ida_hexrays.hx_getb_reginsn
hx_mblock_t_init = _ida_hexrays.hx_mblock_t_init
hx_mblock_t_print = _ida_hexrays.hx_mblock_t_print
hx_mblock_t_dump = _ida_hexrays.hx_mblock_t_dump
hx_mblock_t_vdump_block = _ida_hexrays.hx_mblock_t_vdump_block
hx_mblock_t_insert_into_block = _ida_hexrays.hx_mblock_t_insert_into_block
hx_mblock_t_remove_from_block = _ida_hexrays.hx_mblock_t_remove_from_block
hx_mblock_t_for_all_insns = _ida_hexrays.hx_mblock_t_for_all_insns
hx_mblock_t_for_all_ops = _ida_hexrays.hx_mblock_t_for_all_ops
hx_mblock_t_for_all_uses = _ida_hexrays.hx_mblock_t_for_all_uses
hx_mblock_t_optimize_insn = _ida_hexrays.hx_mblock_t_optimize_insn
hx_mblock_t_optimize_block = _ida_hexrays.hx_mblock_t_optimize_block
hx_mblock_t_build_lists = _ida_hexrays.hx_mblock_t_build_lists
hx_mblock_t_append_use_list = _ida_hexrays.hx_mblock_t_append_use_list
hx_mblock_t_append_def_list = _ida_hexrays.hx_mblock_t_append_def_list
hx_mblock_t_build_use_list = _ida_hexrays.hx_mblock_t_build_use_list
hx_mblock_t_build_def_list = _ida_hexrays.hx_mblock_t_build_def_list
hx_mblock_t_find_first_use = _ida_hexrays.hx_mblock_t_find_first_use
hx_mblock_t_find_redefinition = _ida_hexrays.hx_mblock_t_find_redefinition
hx_mblock_t_is_rhs_redefined = _ida_hexrays.hx_mblock_t_is_rhs_redefined
hx_mblock_t_find_access = _ida_hexrays.hx_mblock_t_find_access
hx_mblock_t_get_valranges = _ida_hexrays.hx_mblock_t_get_valranges
hx_mbl_array_t_idaloc2vd = _ida_hexrays.hx_mbl_array_t_idaloc2vd
hx_mbl_array_t_vd2idaloc = _ida_hexrays.hx_mbl_array_t_vd2idaloc
hx_mbl_array_t_term = _ida_hexrays.hx_mbl_array_t_term
hx_mbl_array_t_optimize_local = _ida_hexrays.hx_mbl_array_t_optimize_local
hx_mbl_array_t_build_graph = _ida_hexrays.hx_mbl_array_t_build_graph
hx_mbl_array_t_get_graph = _ida_hexrays.hx_mbl_array_t_get_graph
hx_mbl_array_t_analyze_calls = _ida_hexrays.hx_mbl_array_t_analyze_calls
hx_mbl_array_t_optimize_global = _ida_hexrays.hx_mbl_array_t_optimize_global
hx_mbl_array_t_alloc_lvars = _ida_hexrays.hx_mbl_array_t_alloc_lvars
hx_mbl_array_t_dump = _ida_hexrays.hx_mbl_array_t_dump
hx_mbl_array_t_vdump_mba = _ida_hexrays.hx_mbl_array_t_vdump_mba
hx_mbl_array_t_print = _ida_hexrays.hx_mbl_array_t_print
hx_mbl_array_t_verify = _ida_hexrays.hx_mbl_array_t_verify
hx_mbl_array_t_mark_chains_dirty = _ida_hexrays.hx_mbl_array_t_mark_chains_dirty
hx_mbl_array_t_insert_block = _ida_hexrays.hx_mbl_array_t_insert_block
hx_mbl_array_t_remove_block = _ida_hexrays.hx_mbl_array_t_remove_block
hx_mbl_array_t_remove_empty_blocks = _ida_hexrays.hx_mbl_array_t_remove_empty_blocks
hx_mbl_array_t_combine_blocks = _ida_hexrays.hx_mbl_array_t_combine_blocks
hx_mbl_array_t_for_all_ops = _ida_hexrays.hx_mbl_array_t_for_all_ops
hx_mbl_array_t_for_all_insns = _ida_hexrays.hx_mbl_array_t_for_all_insns
hx_mbl_array_t_for_all_topinsns = _ida_hexrays.hx_mbl_array_t_for_all_topinsns
hx_mbl_array_t_find_mop = _ida_hexrays.hx_mbl_array_t_find_mop
hx_mbl_array_t_arg = _ida_hexrays.hx_mbl_array_t_arg
hx_mbl_array_t_serialize = _ida_hexrays.hx_mbl_array_t_serialize
hx_mbl_array_t_deserialize = _ida_hexrays.hx_mbl_array_t_deserialize
hx_mbl_graph_t_is_accessed_globally = _ida_hexrays.hx_mbl_graph_t_is_accessed_globally
hx_mbl_graph_t_get_ud = _ida_hexrays.hx_mbl_graph_t_get_ud
hx_mbl_graph_t_get_du = _ida_hexrays.hx_mbl_graph_t_get_du
hx_codegen_t_emit = _ida_hexrays.hx_codegen_t_emit
hx_codegen_t_emit_ = _ida_hexrays.hx_codegen_t_emit_
hx_is_kreg = _ida_hexrays.hx_is_kreg
hx_get_temp_regs = _ida_hexrays.hx_get_temp_regs
hx_get_hexrays_version = _ida_hexrays.hx_get_hexrays_version
hx_open_pseudocode = _ida_hexrays.hx_open_pseudocode
hx_close_pseudocode = _ida_hexrays.hx_close_pseudocode
hx_get_widget_vdui = _ida_hexrays.hx_get_widget_vdui
hx_decompile_many = _ida_hexrays.hx_decompile_many
hx_hexrays_failure_t_desc = _ida_hexrays.hx_hexrays_failure_t_desc
hx_send_database = _ida_hexrays.hx_send_database
hx_gco_info_t_append_to_list = _ida_hexrays.hx_gco_info_t_append_to_list
hx_get_current_operand = _ida_hexrays.hx_get_current_operand
hx_remitem = _ida_hexrays.hx_remitem
hx_negated_relation = _ida_hexrays.hx_negated_relation
hx_swapped_relation = _ida_hexrays.hx_swapped_relation
hx_get_op_signness = _ida_hexrays.hx_get_op_signness
hx_asgop = _ida_hexrays.hx_asgop
hx_asgop_revert = _ida_hexrays.hx_asgop_revert
hx_cnumber_t_print = _ida_hexrays.hx_cnumber_t_print
hx_cnumber_t_value = _ida_hexrays.hx_cnumber_t_value
hx_cnumber_t_assign = _ida_hexrays.hx_cnumber_t_assign
hx_cnumber_t_compare = _ida_hexrays.hx_cnumber_t_compare
hx_var_ref_t_compare = _ida_hexrays.hx_var_ref_t_compare
hx_ctree_visitor_t_apply_to = _ida_hexrays.hx_ctree_visitor_t_apply_to
hx_ctree_visitor_t_apply_to_exprs = _ida_hexrays.hx_ctree_visitor_t_apply_to_exprs
hx_ctree_parentee_t_recalc_parent_types = _ida_hexrays.hx_ctree_parentee_t_recalc_parent_types
hx_cfunc_parentee_t_calc_rvalue_type = _ida_hexrays.hx_cfunc_parentee_t_calc_rvalue_type
hx_citem_locator_t_compare = _ida_hexrays.hx_citem_locator_t_compare
hx_citem_t_contains_expr = _ida_hexrays.hx_citem_t_contains_expr
hx_citem_t_contains_label = _ida_hexrays.hx_citem_t_contains_label
hx_citem_t_find_parent_of = _ida_hexrays.hx_citem_t_find_parent_of
hx_citem_t_find_closest_addr = _ida_hexrays.hx_citem_t_find_closest_addr
hx_cexpr_t_assign = _ida_hexrays.hx_cexpr_t_assign
hx_cexpr_t_compare = _ida_hexrays.hx_cexpr_t_compare
hx_cexpr_t_replace_by = _ida_hexrays.hx_cexpr_t_replace_by
hx_cexpr_t_cleanup = _ida_hexrays.hx_cexpr_t_cleanup
hx_cexpr_t_put_number = _ida_hexrays.hx_cexpr_t_put_number
hx_cexpr_t_print1 = _ida_hexrays.hx_cexpr_t_print1
hx_cexpr_t_calc_type = _ida_hexrays.hx_cexpr_t_calc_type
hx_cexpr_t_equal_effect = _ida_hexrays.hx_cexpr_t_equal_effect
hx_cexpr_t_is_child_of = _ida_hexrays.hx_cexpr_t_is_child_of
hx_cexpr_t_contains_operator = _ida_hexrays.hx_cexpr_t_contains_operator
hx_cexpr_t_get_high_nbit_bound = _ida_hexrays.hx_cexpr_t_get_high_nbit_bound
hx_cexpr_t_get_low_nbit_bound = _ida_hexrays.hx_cexpr_t_get_low_nbit_bound
hx_cexpr_t_requires_lvalue = _ida_hexrays.hx_cexpr_t_requires_lvalue
hx_cexpr_t_has_side_effects = _ida_hexrays.hx_cexpr_t_has_side_effects
hx_cif_t_assign = _ida_hexrays.hx_cif_t_assign
hx_cif_t_compare = _ida_hexrays.hx_cif_t_compare
hx_cloop_t_assign = _ida_hexrays.hx_cloop_t_assign
hx_cfor_t_compare = _ida_hexrays.hx_cfor_t_compare
hx_cwhile_t_compare = _ida_hexrays.hx_cwhile_t_compare
hx_cdo_t_compare = _ida_hexrays.hx_cdo_t_compare
hx_creturn_t_compare = _ida_hexrays.hx_creturn_t_compare
hx_cgoto_t_compare = _ida_hexrays.hx_cgoto_t_compare
hx_casm_t_compare = _ida_hexrays.hx_casm_t_compare
hx_cinsn_t_assign = _ida_hexrays.hx_cinsn_t_assign
hx_cinsn_t_compare = _ida_hexrays.hx_cinsn_t_compare
hx_cinsn_t_replace_by = _ida_hexrays.hx_cinsn_t_replace_by
hx_cinsn_t_cleanup = _ida_hexrays.hx_cinsn_t_cleanup
hx_cinsn_t_new_insn = _ida_hexrays.hx_cinsn_t_new_insn
hx_cinsn_t_create_if = _ida_hexrays.hx_cinsn_t_create_if
hx_cinsn_t_print = _ida_hexrays.hx_cinsn_t_print
hx_cinsn_t_print1 = _ida_hexrays.hx_cinsn_t_print1
hx_cinsn_t_is_ordinary_flow = _ida_hexrays.hx_cinsn_t_is_ordinary_flow
hx_cinsn_t_contains_insn = _ida_hexrays.hx_cinsn_t_contains_insn
hx_cinsn_t_collect_free_breaks = _ida_hexrays.hx_cinsn_t_collect_free_breaks
hx_cinsn_t_collect_free_continues = _ida_hexrays.hx_cinsn_t_collect_free_continues
hx_cblock_t_compare = _ida_hexrays.hx_cblock_t_compare
hx_carglist_t_compare = _ida_hexrays.hx_carglist_t_compare
hx_ccase_t_compare = _ida_hexrays.hx_ccase_t_compare
hx_ccases_t_compare = _ida_hexrays.hx_ccases_t_compare
hx_cswitch_t_compare = _ida_hexrays.hx_cswitch_t_compare
hx_ctree_item_t_get_memptr = _ida_hexrays.hx_ctree_item_t_get_memptr
hx_ctree_item_t_get_lvar = _ida_hexrays.hx_ctree_item_t_get_lvar
hx_ctree_item_t_get_ea = _ida_hexrays.hx_ctree_item_t_get_ea
hx_ctree_item_t_get_label_num = _ida_hexrays.hx_ctree_item_t_get_label_num
hx_lnot = _ida_hexrays.hx_lnot
hx_new_block = _ida_hexrays.hx_new_block
hx_vcreate_helper = _ida_hexrays.hx_vcreate_helper
hx_vcall_helper = _ida_hexrays.hx_vcall_helper
hx_make_num = _ida_hexrays.hx_make_num
hx_make_ref = _ida_hexrays.hx_make_ref
hx_dereference = _ida_hexrays.hx_dereference
hx_save_user_labels = _ida_hexrays.hx_save_user_labels
hx_save_user_cmts = _ida_hexrays.hx_save_user_cmts
hx_save_user_numforms = _ida_hexrays.hx_save_user_numforms
hx_save_user_iflags = _ida_hexrays.hx_save_user_iflags
hx_save_user_unions = _ida_hexrays.hx_save_user_unions
hx_restore_user_labels = _ida_hexrays.hx_restore_user_labels
hx_restore_user_cmts = _ida_hexrays.hx_restore_user_cmts
hx_restore_user_numforms = _ida_hexrays.hx_restore_user_numforms
hx_restore_user_iflags = _ida_hexrays.hx_restore_user_iflags
hx_restore_user_unions = _ida_hexrays.hx_restore_user_unions
hx_cfunc_t_build_c_tree = _ida_hexrays.hx_cfunc_t_build_c_tree
hx_cfunc_t_verify = _ida_hexrays.hx_cfunc_t_verify
hx_cfunc_t_print_dcl = _ida_hexrays.hx_cfunc_t_print_dcl
hx_cfunc_t_print_func = _ida_hexrays.hx_cfunc_t_print_func
hx_cfunc_t_get_func_type = _ida_hexrays.hx_cfunc_t_get_func_type
hx_cfunc_t_get_lvars = _ida_hexrays.hx_cfunc_t_get_lvars
hx_cfunc_t_get_stkoff_delta = _ida_hexrays.hx_cfunc_t_get_stkoff_delta
hx_cfunc_t_find_label = _ida_hexrays.hx_cfunc_t_find_label
hx_cfunc_t_remove_unused_labels = _ida_hexrays.hx_cfunc_t_remove_unused_labels
hx_cfunc_t_get_user_cmt = _ida_hexrays.hx_cfunc_t_get_user_cmt
hx_cfunc_t_set_user_cmt = _ida_hexrays.hx_cfunc_t_set_user_cmt
hx_cfunc_t_get_user_iflags = _ida_hexrays.hx_cfunc_t_get_user_iflags
hx_cfunc_t_set_user_iflags = _ida_hexrays.hx_cfunc_t_set_user_iflags
hx_cfunc_t_has_orphan_cmts = _ida_hexrays.hx_cfunc_t_has_orphan_cmts
hx_cfunc_t_del_orphan_cmts = _ida_hexrays.hx_cfunc_t_del_orphan_cmts
hx_cfunc_t_get_user_union_selection = _ida_hexrays.hx_cfunc_t_get_user_union_selection
hx_cfunc_t_set_user_union_selection = _ida_hexrays.hx_cfunc_t_set_user_union_selection
hx_cfunc_t_get_line_item = _ida_hexrays.hx_cfunc_t_get_line_item
hx_cfunc_t_get_warnings = _ida_hexrays.hx_cfunc_t_get_warnings
hx_cfunc_t_get_eamap = _ida_hexrays.hx_cfunc_t_get_eamap
hx_cfunc_t_get_boundaries = _ida_hexrays.hx_cfunc_t_get_boundaries
hx_cfunc_t_get_pseudocode = _ida_hexrays.hx_cfunc_t_get_pseudocode
hx_cfunc_t_gather_derefs = _ida_hexrays.hx_cfunc_t_gather_derefs
hx_cfunc_t_find_item_coords = _ida_hexrays.hx_cfunc_t_find_item_coords
hx_cfunc_t_cleanup = _ida_hexrays.hx_cfunc_t_cleanup
hx_decompile = _ida_hexrays.hx_decompile
hx_gen_microcode = _ida_hexrays.hx_gen_microcode
hx_mark_cfunc_dirty = _ida_hexrays.hx_mark_cfunc_dirty
hx_clear_cached_cfuncs = _ida_hexrays.hx_clear_cached_cfuncs
hx_has_cached_cfunc = _ida_hexrays.hx_has_cached_cfunc
hx_get_ctype_name = _ida_hexrays.hx_get_ctype_name
hx_create_field_name = _ida_hexrays.hx_create_field_name
hx_install_hexrays_callback = _ida_hexrays.hx_install_hexrays_callback
hx_remove_hexrays_callback = _ida_hexrays.hx_remove_hexrays_callback
hx_vdui_t_set_locked = _ida_hexrays.hx_vdui_t_set_locked
hx_vdui_t_refresh_view = _ida_hexrays.hx_vdui_t_refresh_view
hx_vdui_t_refresh_ctext = _ida_hexrays.hx_vdui_t_refresh_ctext
hx_vdui_t_switch_to = _ida_hexrays.hx_vdui_t_switch_to
hx_vdui_t_get_number = _ida_hexrays.hx_vdui_t_get_number
hx_vdui_t_get_current_label = _ida_hexrays.hx_vdui_t_get_current_label
hx_vdui_t_clear = _ida_hexrays.hx_vdui_t_clear
hx_vdui_t_refresh_cpos = _ida_hexrays.hx_vdui_t_refresh_cpos
hx_vdui_t_get_current_item = _ida_hexrays.hx_vdui_t_get_current_item
hx_vdui_t_ui_rename_lvar = _ida_hexrays.hx_vdui_t_ui_rename_lvar
hx_vdui_t_rename_lvar = _ida_hexrays.hx_vdui_t_rename_lvar
hx_vdui_t_ui_set_call_type = _ida_hexrays.hx_vdui_t_ui_set_call_type
hx_vdui_t_ui_set_lvar_type = _ida_hexrays.hx_vdui_t_ui_set_lvar_type
hx_vdui_t_set_lvar_type = _ida_hexrays.hx_vdui_t_set_lvar_type
hx_vdui_t_ui_edit_lvar_cmt = _ida_hexrays.hx_vdui_t_ui_edit_lvar_cmt
hx_vdui_t_set_lvar_cmt = _ida_hexrays.hx_vdui_t_set_lvar_cmt
hx_vdui_t_ui_map_lvar = _ida_hexrays.hx_vdui_t_ui_map_lvar
hx_vdui_t_ui_unmap_lvar = _ida_hexrays.hx_vdui_t_ui_unmap_lvar
hx_vdui_t_map_lvar = _ida_hexrays.hx_vdui_t_map_lvar
hx_vdui_t_set_strmem_type = _ida_hexrays.hx_vdui_t_set_strmem_type
hx_vdui_t_rename_strmem = _ida_hexrays.hx_vdui_t_rename_strmem
hx_vdui_t_set_global_type = _ida_hexrays.hx_vdui_t_set_global_type
hx_vdui_t_rename_global = _ida_hexrays.hx_vdui_t_rename_global
hx_vdui_t_rename_label = _ida_hexrays.hx_vdui_t_rename_label
hx_vdui_t_jump_enter = _ida_hexrays.hx_vdui_t_jump_enter
hx_vdui_t_ctree_to_disasm = _ida_hexrays.hx_vdui_t_ctree_to_disasm
hx_vdui_t_calc_cmt_type = _ida_hexrays.hx_vdui_t_calc_cmt_type
hx_vdui_t_edit_cmt = _ida_hexrays.hx_vdui_t_edit_cmt
hx_vdui_t_edit_func_cmt = _ida_hexrays.hx_vdui_t_edit_func_cmt
hx_vdui_t_del_orphan_cmts = _ida_hexrays.hx_vdui_t_del_orphan_cmts
hx_vdui_t_set_num_radix = _ida_hexrays.hx_vdui_t_set_num_radix
hx_vdui_t_set_num_enum = _ida_hexrays.hx_vdui_t_set_num_enum
hx_vdui_t_set_num_stroff = _ida_hexrays.hx_vdui_t_set_num_stroff
hx_vdui_t_invert_sign = _ida_hexrays.hx_vdui_t_invert_sign
hx_vdui_t_invert_bits = _ida_hexrays.hx_vdui_t_invert_bits
hx_vdui_t_collapse_item = _ida_hexrays.hx_vdui_t_collapse_item
hx_vdui_t_collapse_lvars = _ida_hexrays.hx_vdui_t_collapse_lvars
hx_vdui_t_split_item = _ida_hexrays.hx_vdui_t_split_item
hx_hexrays_alloc = _ida_hexrays.hx_hexrays_alloc
hx_hexrays_free = _ida_hexrays.hx_hexrays_free
hx_vdui_t_set_noptr_lvar = _ida_hexrays.hx_vdui_t_set_noptr_lvar
hx_select_udt_by_offset = _ida_hexrays.hx_select_udt_by_offset
hx_mblock_t_get_valranges_ = _ida_hexrays.hx_mblock_t_get_valranges_
hx_cfunc_t_refresh_func_ctext = _ida_hexrays.hx_cfunc_t_refresh_func_ctext
hx_checkout_hexrays_license = _ida_hexrays.hx_checkout_hexrays_license
hx_mbl_array_t_copy_block = _ida_hexrays.hx_mbl_array_t_copy_block
hx_mblock_t_optimize_useless_jump = _ida_hexrays.hx_mblock_t_optimize_useless_jump
hx_mblock_t_get_reginsn_qty = _ida_hexrays.hx_mblock_t_get_reginsn_qty
user_numforms_iterator_t_swigregister = _ida_hexrays.user_numforms_iterator_t_swigregister
user_numforms_iterator_t_swigregister(user_numforms_iterator_t)
def user_numforms_begin(*args):
"""
user_numforms_begin(map) -> user_numforms_iterator_t
Get iterator pointing to the beginning of user_numforms_t.
@param map (C++: const user_numforms_t *)
"""
return _ida_hexrays.user_numforms_begin(*args)
def user_numforms_end(*args):
"""
user_numforms_end(map) -> user_numforms_iterator_t
Get iterator pointing to the end of user_numforms_t.
@param map (C++: const user_numforms_t *)
"""
return _ida_hexrays.user_numforms_end(*args)
def user_numforms_next(*args):
"""
user_numforms_next(p) -> user_numforms_iterator_t
Move to the next element.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_next(*args)
def user_numforms_prev(*args):
"""
user_numforms_prev(p) -> user_numforms_iterator_t
Move to the previous element.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_prev(*args)
def user_numforms_first(*args):
"""
user_numforms_first(p) -> operand_locator_t
Get reference to the current map key.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_first(*args)
def user_numforms_second(*args):
"""
user_numforms_second(p) -> number_format_t
Get reference to the current map value.
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_second(*args)
def user_numforms_find(*args):
"""
user_numforms_find(map, key) -> user_numforms_iterator_t
Find the specified key in user_numforms_t.
@param map (C++: const user_numforms_t *)
@param key (C++: const operand_locator_t &)
"""
return _ida_hexrays.user_numforms_find(*args)
def user_numforms_insert(*args):
"""
user_numforms_insert(map, key, val) -> user_numforms_iterator_t
Insert new ( 'operand_locator_t' , 'number_format_t' ) pair into
user_numforms_t.
@param map (C++: user_numforms_t *)
@param key (C++: const operand_locator_t &)
@param val (C++: const number_format_t &)
"""
return _ida_hexrays.user_numforms_insert(*args)
def user_numforms_erase(*args):
"""
user_numforms_erase(map, p)
Erase current element from user_numforms_t.
@param map (C++: user_numforms_t *)
@param p (C++: user_numforms_iterator_t)
"""
return _ida_hexrays.user_numforms_erase(*args)
def user_numforms_clear(*args):
"""
user_numforms_clear(map)
Clear user_numforms_t.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_clear(*args)
def user_numforms_size(*args):
"""
user_numforms_size(map) -> size_t
Get size of user_numforms_t.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_size(*args)
def user_numforms_free(*args):
"""
user_numforms_free(map)
Delete user_numforms_t instance.
@param map (C++: user_numforms_t *)
"""
return _ida_hexrays.user_numforms_free(*args)
def user_numforms_new(*args):
"""
user_numforms_new() -> user_numforms_t
Create a new user_numforms_t instance.
"""
return _ida_hexrays.user_numforms_new(*args)
lvar_mapping_iterator_t_swigregister = _ida_hexrays.lvar_mapping_iterator_t_swigregister
lvar_mapping_iterator_t_swigregister(lvar_mapping_iterator_t)
def lvar_mapping_begin(*args):
"""
lvar_mapping_begin(map) -> lvar_mapping_iterator_t
Get iterator pointing to the beginning of lvar_mapping_t.
@param map (C++: const lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_begin(*args)
def lvar_mapping_end(*args):
"""
lvar_mapping_end(map) -> lvar_mapping_iterator_t
Get iterator pointing to the end of lvar_mapping_t.
@param map (C++: const lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_end(*args)
def lvar_mapping_next(*args):
"""
lvar_mapping_next(p) -> lvar_mapping_iterator_t
Move to the next element.
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_next(*args)
def lvar_mapping_prev(*args):
"""
lvar_mapping_prev(p) -> lvar_mapping_iterator_t
Move to the previous element.
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_prev(*args)
def lvar_mapping_first(*args):
"""
lvar_mapping_first(p) -> lvar_locator_t
Get reference to the current map key.
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_first(*args)
def lvar_mapping_second(*args):
"""
lvar_mapping_second(p) -> lvar_locator_t
Get reference to the current map value.
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_second(*args)
def lvar_mapping_find(*args):
"""
lvar_mapping_find(map, key) -> lvar_mapping_iterator_t
Find the specified key in lvar_mapping_t.
@param map (C++: const lvar_mapping_t *)
@param key (C++: const lvar_locator_t &)
"""
return _ida_hexrays.lvar_mapping_find(*args)
def lvar_mapping_insert(*args):
"""
lvar_mapping_insert(map, key, val) -> lvar_mapping_iterator_t
Insert new ( 'lvar_locator_t' , 'lvar_locator_t' ) pair into
lvar_mapping_t.
@param map (C++: lvar_mapping_t *)
@param key (C++: const lvar_locator_t &)
@param val (C++: const lvar_locator_t &)
"""
return _ida_hexrays.lvar_mapping_insert(*args)
def lvar_mapping_erase(*args):
"""
lvar_mapping_erase(map, p)
Erase current element from lvar_mapping_t.
@param map (C++: lvar_mapping_t *)
@param p (C++: lvar_mapping_iterator_t)
"""
return _ida_hexrays.lvar_mapping_erase(*args)
def lvar_mapping_clear(*args):
"""
lvar_mapping_clear(map)
Clear lvar_mapping_t.
@param map (C++: lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_clear(*args)
def lvar_mapping_size(*args):
"""
lvar_mapping_size(map) -> size_t
Get size of lvar_mapping_t.
@param map (C++: lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_size(*args)
def lvar_mapping_free(*args):
"""
lvar_mapping_free(map)
Delete lvar_mapping_t instance.
@param map (C++: lvar_mapping_t *)
"""
return _ida_hexrays.lvar_mapping_free(*args)
def lvar_mapping_new(*args):
"""
lvar_mapping_new() -> lvar_mapping_t
Create a new lvar_mapping_t instance.
"""
return _ida_hexrays.lvar_mapping_new(*args)
udcall_map_iterator_t_swigregister = _ida_hexrays.udcall_map_iterator_t_swigregister
udcall_map_iterator_t_swigregister(udcall_map_iterator_t)
def udcall_map_begin(*args):
"""
udcall_map_begin(map) -> udcall_map_iterator_t
Get iterator pointing to the beginning of udcall_map_t.
@param map (C++: const udcall_map_t *)
"""
return _ida_hexrays.udcall_map_begin(*args)
def udcall_map_end(*args):
"""
udcall_map_end(map) -> udcall_map_iterator_t
Get iterator pointing to the end of udcall_map_t.
@param map (C++: const udcall_map_t *)
"""
return _ida_hexrays.udcall_map_end(*args)
def udcall_map_next(*args):
"""
udcall_map_next(p) -> udcall_map_iterator_t
Move to the next element.
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_next(*args)
def udcall_map_prev(*args):
"""
udcall_map_prev(p) -> udcall_map_iterator_t
Move to the previous element.
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_prev(*args)
def udcall_map_first(*args):
"""
udcall_map_first(p) -> ea_t const &
Get reference to the current map key.
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_first(*args)
def udcall_map_second(*args):
"""
udcall_map_second(p) -> udcall_t
Get reference to the current map value.
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_second(*args)
def udcall_map_find(*args):
"""
udcall_map_find(map, key) -> udcall_map_iterator_t
Find the specified key in udcall_map_t.
@param map (C++: const udcall_map_t *)
@param key (C++: const ea_t &)
"""
return _ida_hexrays.udcall_map_find(*args)
def udcall_map_insert(*args):
"""
udcall_map_insert(map, key, val) -> udcall_map_iterator_t
Insert new (ea_t, 'udcall_t' ) pair into udcall_map_t.
@param map (C++: udcall_map_t *)
@param key (C++: const ea_t &)
@param val (C++: const udcall_t &)
"""
return _ida_hexrays.udcall_map_insert(*args)
def udcall_map_erase(*args):
"""
udcall_map_erase(map, p)
Erase current element from udcall_map_t.
@param map (C++: udcall_map_t *)
@param p (C++: udcall_map_iterator_t)
"""
return _ida_hexrays.udcall_map_erase(*args)
def udcall_map_clear(*args):
"""
udcall_map_clear(map)
Clear udcall_map_t.
@param map (C++: udcall_map_t *)
"""
return _ida_hexrays.udcall_map_clear(*args)
def udcall_map_size(*args):
"""
udcall_map_size(map) -> size_t
Get size of udcall_map_t.
@param map (C++: udcall_map_t *)
"""
return _ida_hexrays.udcall_map_size(*args)
def udcall_map_free(*args):
"""
udcall_map_free(map)
Delete udcall_map_t instance.
@param map (C++: udcall_map_t *)
"""
return _ida_hexrays.udcall_map_free(*args)
def udcall_map_new(*args):
"""
udcall_map_new() -> udcall_map_t *
Create a new udcall_map_t instance.
"""
return _ida_hexrays.udcall_map_new(*args)
user_cmts_iterator_t_swigregister = _ida_hexrays.user_cmts_iterator_t_swigregister
user_cmts_iterator_t_swigregister(user_cmts_iterator_t)
def user_cmts_begin(*args):
"""
user_cmts_begin(map) -> user_cmts_iterator_t
Get iterator pointing to the beginning of user_cmts_t.
@param map (C++: const user_cmts_t *)
"""
return _ida_hexrays.user_cmts_begin(*args)
def user_cmts_end(*args):
"""
user_cmts_end(map) -> user_cmts_iterator_t
Get iterator pointing to the end of user_cmts_t.
@param map (C++: const user_cmts_t *)
"""
return _ida_hexrays.user_cmts_end(*args)
def user_cmts_next(*args):
"""
user_cmts_next(p) -> user_cmts_iterator_t
Move to the next element.
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_next(*args)
def user_cmts_prev(*args):
"""
user_cmts_prev(p) -> user_cmts_iterator_t
Move to the previous element.
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_prev(*args)
def user_cmts_first(*args):
"""
user_cmts_first(p) -> treeloc_t
Get reference to the current map key.
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_first(*args)
def user_cmts_second(*args):
"""
user_cmts_second(p) -> citem_cmt_t
Get reference to the current map value.
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_second(*args)
def user_cmts_find(*args):
"""
user_cmts_find(map, key) -> user_cmts_iterator_t
Find the specified key in user_cmts_t.
@param map (C++: const user_cmts_t *)
@param key (C++: const treeloc_t &)
"""
return _ida_hexrays.user_cmts_find(*args)
def user_cmts_insert(*args):
"""
user_cmts_insert(map, key, val) -> user_cmts_iterator_t
Insert new ( 'treeloc_t' , 'citem_cmt_t' ) pair into user_cmts_t.
@param map (C++: user_cmts_t *)
@param key (C++: const treeloc_t &)
@param val (C++: const citem_cmt_t &)
"""
return _ida_hexrays.user_cmts_insert(*args)
def user_cmts_erase(*args):
"""
user_cmts_erase(map, p)
Erase current element from user_cmts_t.
@param map (C++: user_cmts_t *)
@param p (C++: user_cmts_iterator_t)
"""
return _ida_hexrays.user_cmts_erase(*args)
def user_cmts_clear(*args):
"""
user_cmts_clear(map)
Clear user_cmts_t.
@param map (C++: user_cmts_t *)
"""
return _ida_hexrays.user_cmts_clear(*args)
def user_cmts_size(*args):
"""
user_cmts_size(map) -> size_t
Get size of user_cmts_t.
@param map (C++: user_cmts_t *)
"""
return _ida_hexrays.user_cmts_size(*args)
def user_cmts_free(*args):
"""
user_cmts_free(map)
Delete user_cmts_t instance.
@param map (C++: user_cmts_t *)
"""
return _ida_hexrays.user_cmts_free(*args)
def user_cmts_new(*args):
"""
user_cmts_new() -> user_cmts_t
Create a new user_cmts_t instance.
"""
return _ida_hexrays.user_cmts_new(*args)
user_iflags_iterator_t_swigregister = _ida_hexrays.user_iflags_iterator_t_swigregister
user_iflags_iterator_t_swigregister(user_iflags_iterator_t)
def user_iflags_begin(*args):
"""
user_iflags_begin(map) -> user_iflags_iterator_t
Get iterator pointing to the beginning of user_iflags_t.
@param map (C++: const user_iflags_t *)
"""
return _ida_hexrays.user_iflags_begin(*args)
def user_iflags_end(*args):
"""
user_iflags_end(map) -> user_iflags_iterator_t
Get iterator pointing to the end of user_iflags_t.
@param map (C++: const user_iflags_t *)
"""
return _ida_hexrays.user_iflags_end(*args)
def user_iflags_next(*args):
"""
user_iflags_next(p) -> user_iflags_iterator_t
Move to the next element.
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_next(*args)
def user_iflags_prev(*args):
"""
user_iflags_prev(p) -> user_iflags_iterator_t
Move to the previous element.
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_prev(*args)
def user_iflags_first(*args):
"""
user_iflags_first(p) -> citem_locator_t
Get reference to the current map key.
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_first(*args)
def user_iflags_find(*args):
"""
user_iflags_find(map, key) -> user_iflags_iterator_t
Find the specified key in user_iflags_t.
@param map (C++: const user_iflags_t *)
@param key (C++: const citem_locator_t &)
"""
return _ida_hexrays.user_iflags_find(*args)
def user_iflags_insert(*args):
"""
user_iflags_insert(map, key, val) -> user_iflags_iterator_t
Insert new ( 'citem_locator_t' , int32) pair into user_iflags_t.
@param map (C++: user_iflags_t *)
@param key (C++: const citem_locator_t &)
@param val (C++: const int32 &)
"""
return _ida_hexrays.user_iflags_insert(*args)
def user_iflags_erase(*args):
"""
user_iflags_erase(map, p)
Erase current element from user_iflags_t.
@param map (C++: user_iflags_t *)
@param p (C++: user_iflags_iterator_t)
"""
return _ida_hexrays.user_iflags_erase(*args)
def user_iflags_clear(*args):
"""
user_iflags_clear(map)
Clear user_iflags_t.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_clear(*args)
def user_iflags_size(*args):
"""
user_iflags_size(map) -> size_t
Get size of user_iflags_t.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_size(*args)
def user_iflags_free(*args):
"""
user_iflags_free(map)
Delete user_iflags_t instance.
@param map (C++: user_iflags_t *)
"""
return _ida_hexrays.user_iflags_free(*args)
def user_iflags_new(*args):
"""
user_iflags_new() -> user_iflags_t
Create a new user_iflags_t instance.
"""
return _ida_hexrays.user_iflags_new(*args)
user_unions_iterator_t_swigregister = _ida_hexrays.user_unions_iterator_t_swigregister
user_unions_iterator_t_swigregister(user_unions_iterator_t)
def user_unions_begin(*args):
"""
user_unions_begin(map) -> user_unions_iterator_t
Get iterator pointing to the beginning of user_unions_t.
@param map (C++: const user_unions_t *)
"""
return _ida_hexrays.user_unions_begin(*args)
def user_unions_end(*args):
"""
user_unions_end(map) -> user_unions_iterator_t
Get iterator pointing to the end of user_unions_t.
@param map (C++: const user_unions_t *)
"""
return _ida_hexrays.user_unions_end(*args)
def user_unions_next(*args):
"""
user_unions_next(p) -> user_unions_iterator_t
Move to the next element.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_next(*args)
def user_unions_prev(*args):
"""
user_unions_prev(p) -> user_unions_iterator_t
Move to the previous element.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_prev(*args)
def user_unions_first(*args):
"""
user_unions_first(p) -> ea_t const &
Get reference to the current map key.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_first(*args)
def user_unions_second(*args):
"""
user_unions_second(p) -> intvec_t
Get reference to the current map value.
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_second(*args)
def user_unions_find(*args):
"""
user_unions_find(map, key) -> user_unions_iterator_t
Find the specified key in user_unions_t.
@param map (C++: const user_unions_t *)
@param key (C++: const ea_t &)
"""
return _ida_hexrays.user_unions_find(*args)
def user_unions_insert(*args):
"""
user_unions_insert(map, key, val) -> user_unions_iterator_t
Insert new (ea_t, intvec_t) pair into user_unions_t.
@param map (C++: user_unions_t *)
@param key (C++: const ea_t &)
@param val (C++: const intvec_t &)
"""
return _ida_hexrays.user_unions_insert(*args)
def user_unions_erase(*args):
"""
user_unions_erase(map, p)
Erase current element from user_unions_t.
@param map (C++: user_unions_t *)
@param p (C++: user_unions_iterator_t)
"""
return _ida_hexrays.user_unions_erase(*args)
def user_unions_clear(*args):
"""
user_unions_clear(map)
Clear user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_clear(*args)
def user_unions_size(*args):
"""
user_unions_size(map) -> size_t
Get size of user_unions_t.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_size(*args)
def user_unions_free(*args):
"""
user_unions_free(map)
Delete user_unions_t instance.
@param map (C++: user_unions_t *)
"""
return _ida_hexrays.user_unions_free(*args)
def user_unions_new(*args):
"""
user_unions_new() -> user_unions_t
Create a new user_unions_t instance.
"""
return _ida_hexrays.user_unions_new(*args)
user_labels_iterator_t_swigregister = _ida_hexrays.user_labels_iterator_t_swigregister
user_labels_iterator_t_swigregister(user_labels_iterator_t)
def user_labels_begin(*args):
"""
user_labels_begin(map) -> user_labels_iterator_t
Get iterator pointing to the beginning of user_labels_t.
@param map (C++: const user_labels_t *)
"""
return _ida_hexrays.user_labels_begin(*args)
def user_labels_end(*args):
"""
user_labels_end(map) -> user_labels_iterator_t
Get iterator pointing to the end of user_labels_t.
@param map (C++: const user_labels_t *)
"""
return _ida_hexrays.user_labels_end(*args)
def user_labels_next(*args):
"""
user_labels_next(p) -> user_labels_iterator_t
Move to the next element.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_next(*args)
def user_labels_prev(*args):
"""
user_labels_prev(p) -> user_labels_iterator_t
Move to the previous element.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_prev(*args)
def user_labels_first(*args):
"""
user_labels_first(p) -> int const &
Get reference to the current map key.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_first(*args)
def user_labels_second(*args):
"""
user_labels_second(p) -> qstring &
Get reference to the current map value.
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_second(*args)
def user_labels_find(*args):
"""
user_labels_find(map, key) -> user_labels_iterator_t
Find the specified key in user_labels_t.
@param map (C++: const user_labels_t *)
@param key (C++: const int &)
"""
return _ida_hexrays.user_labels_find(*args)
def user_labels_insert(*args):
"""
user_labels_insert(map, key, val) -> user_labels_iterator_t
Insert new (int, qstring) pair into user_labels_t.
@param map (C++: user_labels_t *)
@param key (C++: const int &)
@param val (C++: const qstring &)
"""
return _ida_hexrays.user_labels_insert(*args)
def user_labels_erase(*args):
"""
user_labels_erase(map, p)
Erase current element from user_labels_t.
@param map (C++: user_labels_t *)
@param p (C++: user_labels_iterator_t)
"""
return _ida_hexrays.user_labels_erase(*args)
def user_labels_clear(*args):
"""
user_labels_clear(map)
Clear user_labels_t.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_clear(*args)
def user_labels_size(*args):
"""
user_labels_size(map) -> size_t
Get size of user_labels_t.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_size(*args)
def user_labels_free(*args):
"""
user_labels_free(map)
Delete user_labels_t instance.
@param map (C++: user_labels_t *)
"""
return _ida_hexrays.user_labels_free(*args)
def user_labels_new(*args):
"""
user_labels_new() -> user_labels_t
Create a new user_labels_t instance.
"""
return _ida_hexrays.user_labels_new(*args)
eamap_iterator_t_swigregister = _ida_hexrays.eamap_iterator_t_swigregister
eamap_iterator_t_swigregister(eamap_iterator_t)
def eamap_begin(*args):
"""
eamap_begin(map) -> eamap_iterator_t
Get iterator pointing to the beginning of eamap_t.
@param map (C++: const eamap_t *)
"""
return _ida_hexrays.eamap_begin(*args)
def eamap_end(*args):
"""
eamap_end(map) -> eamap_iterator_t
Get iterator pointing to the end of eamap_t.
@param map (C++: const eamap_t *)
"""
return _ida_hexrays.eamap_end(*args)
def eamap_next(*args):
"""
eamap_next(p) -> eamap_iterator_t
Move to the next element.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_next(*args)
def eamap_prev(*args):
"""
eamap_prev(p) -> eamap_iterator_t
Move to the previous element.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_prev(*args)
def eamap_first(*args):
"""
eamap_first(p) -> ea_t const &
Get reference to the current map key.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_first(*args)
def eamap_second(*args):
"""
eamap_second(p) -> cinsnptrvec_t
Get reference to the current map value.
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_second(*args)
def eamap_find(*args):
"""
eamap_find(map, key) -> eamap_iterator_t
Find the specified key in eamap_t.
@param map (C++: const eamap_t *)
@param key (C++: const ea_t &)
"""
return _ida_hexrays.eamap_find(*args)
def eamap_insert(*args):
"""
eamap_insert(map, key, val) -> eamap_iterator_t
Insert new (ea_t, cinsnptrvec_t) pair into eamap_t.
@param map (C++: eamap_t *)
@param key (C++: const ea_t &)
@param val (C++: const cinsnptrvec_t &)
"""
return _ida_hexrays.eamap_insert(*args)
def eamap_erase(*args):
"""
eamap_erase(map, p)
Erase current element from eamap_t.
@param map (C++: eamap_t *)
@param p (C++: eamap_iterator_t)
"""
return _ida_hexrays.eamap_erase(*args)
def eamap_clear(*args):
"""
eamap_clear(map)
Clear eamap_t.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_clear(*args)
def eamap_size(*args):
"""
eamap_size(map) -> size_t
Get size of eamap_t.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_size(*args)
def eamap_free(*args):
"""
eamap_free(map)
Delete eamap_t instance.
@param map (C++: eamap_t *)
"""
return _ida_hexrays.eamap_free(*args)
def eamap_new(*args):
"""
eamap_new() -> eamap_t
Create a new eamap_t instance.
"""
return _ida_hexrays.eamap_new(*args)
boundaries_iterator_t_swigregister = _ida_hexrays.boundaries_iterator_t_swigregister
boundaries_iterator_t_swigregister(boundaries_iterator_t)
def boundaries_begin(*args):
"""
boundaries_begin(map) -> boundaries_iterator_t
Get iterator pointing to the beginning of boundaries_t.
@param map (C++: const boundaries_t *)
"""
return _ida_hexrays.boundaries_begin(*args)
def boundaries_end(*args):
"""
boundaries_end(map) -> boundaries_iterator_t
Get iterator pointing to the end of boundaries_t.
@param map (C++: const boundaries_t *)
"""
return _ida_hexrays.boundaries_end(*args)
def boundaries_next(*args):
"""
boundaries_next(p) -> boundaries_iterator_t
Move to the next element.
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_next(*args)
def boundaries_prev(*args):
"""
boundaries_prev(p) -> boundaries_iterator_t
Move to the previous element.
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_prev(*args)
def boundaries_first(*args):
"""
boundaries_first(p) -> cinsn_t
Get reference to the current map key.
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_first(*args)
def boundaries_second(*args):
"""
boundaries_second(p) -> rangeset_t
Get reference to the current map value.
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_second(*args)
def boundaries_erase(*args):
"""
boundaries_erase(map, p)
Erase current element from boundaries_t.
@param map (C++: boundaries_t *)
@param p (C++: boundaries_iterator_t)
"""
return _ida_hexrays.boundaries_erase(*args)
def boundaries_clear(*args):
"""
boundaries_clear(map)
Clear boundaries_t.
@param map (C++: boundaries_t *)
"""
return _ida_hexrays.boundaries_clear(*args)
def boundaries_size(*args):
"""
boundaries_size(map) -> size_t
Get size of boundaries_t.
@param map (C++: boundaries_t *)
"""
return _ida_hexrays.boundaries_size(*args)
def boundaries_free(*args):
"""
boundaries_free(map)
Delete boundaries_t instance.
@param map (C++: boundaries_t *)
"""
return _ida_hexrays.boundaries_free(*args)
def boundaries_new(*args):
"""
boundaries_new() -> boundaries_t
Create a new boundaries_t instance.
"""
return _ida_hexrays.boundaries_new(*args)
block_chains_iterator_t_swigregister = _ida_hexrays.block_chains_iterator_t_swigregister
block_chains_iterator_t_swigregister(block_chains_iterator_t)
def block_chains_begin(*args):
"""
block_chains_begin(set) -> block_chains_iterator_t
Get iterator pointing to the beginning of 'block_chains_t' .
@param set (C++: const block_chains_t *)
"""
return _ida_hexrays.block_chains_begin(*args)
def block_chains_end(*args):
"""
block_chains_end(set) -> block_chains_iterator_t
Get iterator pointing to the end of 'block_chains_t' .
@param set (C++: const block_chains_t *)
"""
return _ida_hexrays.block_chains_end(*args)
def block_chains_next(*args):
"""
block_chains_next(p) -> block_chains_iterator_t
Move to the next element.
@param p (C++: block_chains_iterator_t)
"""
return _ida_hexrays.block_chains_next(*args)
def block_chains_prev(*args):
"""
block_chains_prev(p) -> block_chains_iterator_t
Move to the previous element.
@param p (C++: block_chains_iterator_t)
"""
return _ida_hexrays.block_chains_prev(*args)
def block_chains_get(*args):
"""
block_chains_get(p) -> chain_t
Get reference to the current set value.
@param p (C++: block_chains_iterator_t)
"""
return _ida_hexrays.block_chains_get(*args)
def block_chains_find(*args):
"""
block_chains_find(set, val) -> block_chains_iterator_t
Find the specified key in set 'block_chains_t' .
@param set (C++: const block_chains_t *)
@param val (C++: const chain_t &)
"""
return _ida_hexrays.block_chains_find(*args)
def block_chains_insert(*args):
"""
block_chains_insert(set, val) -> block_chains_iterator_t
Insert new ( 'chain_t' ) into set 'block_chains_t' .
@param set (C++: block_chains_t *)
@param val (C++: const chain_t &)
"""
return _ida_hexrays.block_chains_insert(*args)
def block_chains_erase(*args):
"""
block_chains_erase(set, p)
Erase current element from 'block_chains_t' .
@param set (C++: block_chains_t *)
@param p (C++: block_chains_iterator_t)
"""
return _ida_hexrays.block_chains_erase(*args)
def block_chains_clear(*args):
"""
block_chains_clear(set)
Clear 'block_chains_t' .
@param set (C++: block_chains_t *)
"""
return _ida_hexrays.block_chains_clear(*args)
def block_chains_size(*args):
"""
block_chains_size(set) -> size_t
Get size of 'block_chains_t' .
@param set (C++: block_chains_t *)
"""
return _ida_hexrays.block_chains_size(*args)
def block_chains_free(*args):
"""
block_chains_free(set)
Delete 'block_chains_t' instance.
@param set (C++: block_chains_t *)
"""
return _ida_hexrays.block_chains_free(*args)
def block_chains_new(*args):
"""
block_chains_new() -> block_chains_t
Create a new 'block_chains_t' instance.
"""
return _ida_hexrays.block_chains_new(*args)
#<pycode(py_hexrays)>
import ida_funcs
hexrays_failure_t.__str__ = lambda self: str("%x: %s" % (self.errea, self.desc()))
# ---------------------------------------------------------------------
# Renamings
is_allowed_on_small_struni = accepts_small_udts
is_small_struni = is_small_udt
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
def decompile(ea, hf=None, flags=0):
if isinstance(ea, (int, long)):
func = ida_funcs.get_func(ea)
if not func: return
elif type(ea) == ida_funcs.func_t:
func = ea
else:
raise RuntimeError('arg 1 of decompile expects either ea_t or cfunc_t argument')
if hf is None:
hf = hexrays_failure_t()
ptr = _ida_hexrays.decompile_func(func, hf, flags)
if ptr.__deref__() is None:
raise DecompilationFailure(hf)
return ptr
# ---------------------------------------------------------------------
# stringify all string types
#qtype.__str__ = qtype.c_str
#qstring.__str__ = qstring.c_str
#citem_cmt_t.__str__ = citem_cmt_t.c_str
# ---------------------------------------------------------------------
# listify all list types
import ida_idaapi
ida_idaapi._listify_types(
cinsnptrvec_t,
ctree_items_t,
qvector_lvar_t,
qvector_carg_t,
qvector_ccase_t,
hexwarns_t,
history_t,
lvar_saved_infos_t,
ui_stroff_ops_t)
def citem_to_specific_type(self):
"""
cast the citem_t object to its more specific type, either cexpr_t or cinsn_t.
"""
if self.op >= cot_empty and self.op <= cot_last:
return self.cexpr
elif self.op >= cit_empty and self.op < cit_end:
return self.cinsn
raise RuntimeError('unknown op type %s' % (repr(self.op), ))
citem_t.to_specific_type = property(citem_to_specific_type)
"""
array used for translating cinsn_t->op type to their names.
"""
cinsn_t.op_to_typename = {}
for k in dir(_ida_hexrays):
if k.startswith('cit_'):
cinsn_t.op_to_typename[getattr(_ida_hexrays, k)] = k[4:]
"""
array used for translating cexpr_t->op type to their names.
"""
cexpr_t.op_to_typename = {}
for k in dir(_ida_hexrays):
if k.startswith('cot_'):
cexpr_t.op_to_typename[getattr(_ida_hexrays, k)] = k[4:]
cinsn_t.opname = property(property_op_to_typename)
cexpr_t.opname = property(property_op_to_typename)
def cexpr_operands(self):
"""
return a dictionary with the operands of a cexpr_t.
"""
if self.op >= cot_comma and self.op <= cot_asgumod or \
self.op >= cot_lor and self.op <= cot_fdiv or \
self.op == cot_idx:
return {'x': self.x, 'y': self.y}
elif self.op == cot_tern:
return {'x': self.x, 'y': self.y, 'z': self.z}
elif self.op in [cot_fneg, cot_neg, cot_sizeof] or \
self.op >= cot_lnot and self.op <= cot_predec:
return {'x': self.x}
elif self.op == cot_cast:
return {'type': self.type, 'x': self.x}
elif self.op == cot_call:
return {'x': self.x, 'a': self.a}
elif self.op in [cot_memref, cot_memptr]:
return {'x': self.x, 'm': self.m}
elif self.op == cot_num:
return {'n': self.n}
elif self.op == cot_fnum:
return {'fpc': self.fpc}
elif self.op == cot_str:
return {'string': self.string}
elif self.op == cot_obj:
return {'obj_ea': self.obj_ea}
elif self.op == cot_var:
return {'v': self.v}
elif self.op == cot_helper:
return {'helper': self.helper}
raise RuntimeError('unknown op type %s' % self.opname)
cexpr_t.operands = property(cexpr_operands)
def cinsn_details(self):
"""
return the details pointer for the cinsn_t object depending on the value of its op member. \
this is one of the cblock_t, cif_t, etc. objects.
"""
if self.op not in self.op_to_typename:
raise RuntimeError('unknown item->op type')
opname = self.opname
if opname == 'empty':
return self
if opname in ['break', 'continue']:
return None
return getattr(self, 'c' + opname)
cinsn_t.details = property(cinsn_details)
cblock_t.__iter__ = cblock_iter
cblock_t.__len__ = cblock_t.size
# cblock.find(cinsn_t) -> returns the iterator positioned at the given item
cblock_t.find = cblock_find
# cblock.index(cinsn_t) -> returns the index of the given item
cblock_t.index = cblock_index
# cblock.at(int) -> returns the item at the given index index
cblock_t.at = cblock_at
# cblock.remove(cinsn_t)
cblock_t.remove = cblock_remove
# cblock.insert(index, cinsn_t)
cblock_t.insert = cblock_insert
cfuncptr_t.__str__ = lambda self: str(self.__deref__())
import ida_typeinf
def cfunc_type(self):
"""
Get the function's return type tinfo_t object.
"""
tif = ida_typeinf.tinfo_t()
result = self.get_func_type(tif)
if not result:
return
return tif
cfunc_t.type = property(cfunc_type)
cfuncptr_t.type = property(lambda self: self.__deref__().type)
cfunc_t.arguments = property(lambda self: [o for o in self.lvars if o.is_arg_var])
cfuncptr_t.arguments = property(lambda self: self.__deref__().arguments)
cfunc_t.lvars = property(cfunc_t.get_lvars)
cfuncptr_t.lvars = property(lambda self: self.__deref__().lvars)
cfunc_t.warnings = property(cfunc_t.get_warnings)
cfuncptr_t.warnings = property(lambda self: self.__deref__().warnings)
cfunc_t.pseudocode = property(cfunc_t.get_pseudocode)
cfuncptr_t.pseudocode = property(lambda self: self.__deref__().get_pseudocode())
cfunc_t.eamap = property(cfunc_t.get_eamap)
cfuncptr_t.eamap = property(lambda self: self.__deref__().get_eamap())
cfunc_t.boundaries = property(cfunc_t.get_boundaries)
cfuncptr_t.boundaries = property(lambda self: self.__deref__().get_boundaries())
#pragma SWIG nowarn=+503
lvar_t.used = property(lvar_t.used)
lvar_t.typed = property(lvar_t.typed)
lvar_t.mreg_done = property(lvar_t.mreg_done)
lvar_t.has_nice_name = property(lvar_t.has_nice_name)
lvar_t.is_unknown_width = property(lvar_t.is_unknown_width)
lvar_t.has_user_info = property(lvar_t.has_user_info)
lvar_t.has_user_name = property(lvar_t.has_user_name)
lvar_t.has_user_type = property(lvar_t.has_user_type)
lvar_t.is_result_var = property(lvar_t.is_result_var)
lvar_t.is_arg_var = property(lvar_t.is_arg_var)
lvar_t.is_fake_var = property(lvar_t.is_fake_var)
lvar_t.is_overlapped_var = property(lvar_t.is_overlapped_var)
lvar_t.is_floating_var = property(lvar_t.is_floating_var)
lvar_t.is_spoiled_var = property(lvar_t.is_spoiled_var)
lvar_t.is_mapdst_var = property(lvar_t.is_mapdst_var)
# dictify all dict-like types
#_map_as_dict(user_labels_t, 'user_labels', (int, long), qstring)
_map_as_dict(user_cmts_t, 'user_cmts', treeloc_t, citem_cmt_t)
_map_as_dict(user_numforms_t, 'user_numforms', operand_locator_t, number_format_t)
_map_as_dict(user_iflags_t, 'user_iflags', citem_locator_t, int)
import ida_pro
_map_as_dict(user_unions_t, 'user_unions', (int, long), ida_pro.intvec_t)
_map_as_dict(eamap_t, 'eamap', long, cinsnptrvec_t)
import ida_range
_map_as_dict(boundaries_t, 'boundaries', cinsn_t, ida_range.rangeset_t)
#
# Object ownership
#
# ----------------
def install_hexrays_callback(callback):
"Deprecated. Please use Hexrays_Hooks instead"
h = __cbhooks_t(callback)
h.hook()
return True
def remove_hexrays_callback(callback):
"Deprecated. Please use Hexrays_Hooks instead"
for inst in __cbhooks_t.instances:
if inst.callback == callback:
inst.unhook()
__cbhooks_t.instances.remove(inst)
return 1
return 0
#</pycode(py_hexrays)>
if _BC695:
get_tform_vdui=get_widget_vdui
hx_get_tform_vdui=hx_get_widget_vdui
HEXRAYS_API_MAGIC1=(HEXRAYS_API_MAGIC>>32)
HEXRAYS_API_MAGIC2=(HEXRAYS_API_MAGIC&0xFFFFFFFF)
| 29.641948 | 603 | 0.637696 |
bcc6795e9da5c859c6308d7dfd37a7f5806dbb41
| 3,714 |
py
|
Python
|
webapp/gen_graphs.py
|
bfitzy2142/NET4901-SP
|
908c13332a5356bd6a59879b8d78af76432b807c
|
[
"MIT"
] | 3 |
2019-08-04T03:09:02.000Z
|
2020-06-08T15:48:36.000Z
|
webapp/gen_graphs.py
|
bfitzy2142/NET4901-SP
|
908c13332a5356bd6a59879b8d78af76432b807c
|
[
"MIT"
] | 3 |
2019-09-06T08:30:21.000Z
|
2020-06-30T03:24:56.000Z
|
webapp/gen_graphs.py
|
bfitzy2142/NET4901-SP-SDLENS
|
908c13332a5356bd6a59879b8d78af76432b807c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
@author: Sam Cook
MySql Parser for graphical presentation
"""
import mysql.connector
import datetime
from mysql.connector import Error
from datetime import datetime, timedelta
import json
| 34.71028 | 72 | 0.53608 |
bcca1a19ecd367ba4725d3ef774b347cae61be62
| 830 |
py
|
Python
|
scqubits/tests/test_fluxqubit.py
|
dmtvanzanten/scqubits
|
d4d8a0f71ac91077594a6173348279aa490ed048
|
[
"BSD-3-Clause"
] | null | null | null |
scqubits/tests/test_fluxqubit.py
|
dmtvanzanten/scqubits
|
d4d8a0f71ac91077594a6173348279aa490ed048
|
[
"BSD-3-Clause"
] | null | null | null |
scqubits/tests/test_fluxqubit.py
|
dmtvanzanten/scqubits
|
d4d8a0f71ac91077594a6173348279aa490ed048
|
[
"BSD-3-Clause"
] | null | null | null |
# test_fluxqubit.py
# meant to be run with 'pytest'
#
# This file is part of scqubits.
#
# Copyright (c) 2019 and later, Jens Koch and Peter Groszkowski
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
############################################################################
import numpy as np
from scqubits import FluxQubit
from scqubits.tests.conftest import StandardTests
| 28.62069 | 76 | 0.622892 |
bcca9310b776373045a4dd0e28575a2063a3d591
| 1,379 |
py
|
Python
|
PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852 |
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371 |
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
PhysicsTools/PatAlgos/python/producersLayer1/pfParticleProducer_cfi.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240 |
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
patPFParticles = cms.EDProducer("PATPFParticleProducer",
# General configurables
pfCandidateSource = cms.InputTag("noJet"),
# MC matching configurables
addGenMatch = cms.bool(False),
genParticleMatch = cms.InputTag(""), ## particles source to be used for the MC matching
## must be an InputTag or VInputTag to a product of
## type edm::Association<reco::GenParticleCollection>
embedGenMatch = cms.bool(False), ## embed gen match inside the object instead of storing the ref
# add user data
userData = cms.PSet(
# add custom classes here
userClasses = cms.PSet(
src = cms.VInputTag('')
),
# add doubles here
userFloats = cms.PSet(
src = cms.VInputTag('')
),
# add ints here
userInts = cms.PSet(
src = cms.VInputTag('')
),
# add candidate ptrs here
userCands = cms.PSet(
src = cms.VInputTag('')
),
# add "inline" functions here
userFunctions = cms.vstring(),
userFunctionLabels = cms.vstring()
),
# Efficiencies
addEfficiencies = cms.bool(False),
efficiencies = cms.PSet(),
# resolution
addResolutions = cms.bool(False),
resolutions = cms.PSet(),
)
| 29.340426 | 106 | 0.585207 |
bccb37cf2799cc964344db7c5cf679594dae2889
| 2,252 |
py
|
Python
|
tests/test_api.py
|
ines/spacy-js
|
5b7a86cb0d1099285e01252f7e1d44a36ad9a07f
|
[
"MIT"
] | 141 |
2018-10-27T17:18:54.000Z
|
2022-03-31T11:08:02.000Z
|
tests/test_api.py
|
Fabulabs/spacy-js
|
c7a34298203d26b25f9eb1f6b9eb875faa33d144
|
[
"MIT"
] | 16 |
2018-10-27T21:44:36.000Z
|
2022-01-22T03:01:54.000Z
|
tests/test_api.py
|
Fabulabs/spacy-js
|
c7a34298203d26b25f9eb1f6b9eb875faa33d144
|
[
"MIT"
] | 22 |
2019-01-12T16:38:20.000Z
|
2022-03-14T19:11:38.000Z
|
# coding: utf8
from __future__ import unicode_literals
import pytest
import spacy
import json
from api.server import parse, doc2json, load_model
def test_server_parse(model, text, doc):
load_model(model)
json_doc = parse(model, text)
direct_json_doc = doc2json(doc, model)
assert json.dumps(json_doc, sort_keys=True) == json.dumps(
direct_json_doc, sort_keys=True
)
def test_doc2json_doc_tokens(doc, model):
data = doc2json(doc, model)
assert data["model"] == model
assert data["doc"]["text"] == doc.text
assert data["doc"]["text_with_ws"] == doc.text_with_ws
assert data["doc"]["is_tagged"]
assert data["doc"]["is_parsed"]
assert data["doc"]["is_sentenced"]
assert len(data["tokens"]) == len(doc)
assert data["tokens"][0]["text"] == doc[0].text
assert data["tokens"][0]["head"] == doc[0].head.i
def test_doc2json_doc_ents(doc, model):
data = doc2json(doc, model)
ents = list(doc.ents)
assert "ents" in data
assert len(data["ents"]) == len(ents)
assert len(data["ents"]) >= 1
assert data["ents"][0]["start"] == ents[0].start
assert data["ents"][0]["end"] == ents[0].end
assert data["ents"][0]["label"] == ents[0].label_
def test_doc2json_doc_sents(doc, model):
data = doc2json(doc, model)
sents = list(doc.sents)
assert "sents" in data
assert len(data["sents"]) == len(sents)
assert len(data["sents"]) >= 1
assert data["sents"][0]["start"] == sents[0].start
assert data["sents"][0]["end"] == sents[0].end
def test_doc2json_doc_noun_chunks(doc, model):
data = doc2json(doc, model)
chunks = list(doc.noun_chunks)
assert "noun_chunks" in data
assert len(data["noun_chunks"]) == len(chunks)
assert len(data["noun_chunks"]) >= 1
assert data["noun_chunks"][0]["start"] == chunks[0].start
assert data["noun_chunks"][0]["end"] == chunks[0].end
| 27.463415 | 68 | 0.654085 |
bcccafa97336dc1ded4587f29664425a01e6d815
| 28,365 |
py
|
Python
|
python/GafferArnold/ArnoldTextureBake.py
|
medubelko/gaffer
|
12c5994c21dcfb8b13b5b86efbcecdcb29202b33
|
[
"BSD-3-Clause"
] | 1 |
2019-12-02T02:31:25.000Z
|
2019-12-02T02:31:25.000Z
|
python/GafferArnold/ArnoldTextureBake.py
|
medubelko/gaffer
|
12c5994c21dcfb8b13b5b86efbcecdcb29202b33
|
[
"BSD-3-Clause"
] | null | null | null |
python/GafferArnold/ArnoldTextureBake.py
|
medubelko/gaffer
|
12c5994c21dcfb8b13b5b86efbcecdcb29202b33
|
[
"BSD-3-Clause"
] | null | null | null |
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
import IECoreScene
import Gaffer
import GafferScene
import GafferArnold
import GafferDispatch
import GafferImage
import imath
import inspect
IECore.registerRunTimeTyped( ArnoldTextureBake, typeName = "GafferArnold::ArnoldTextureBake" )
| 47.196339 | 175 | 0.692649 |
bccd1fa8fe336f245d1474aeb673c6c021c08a1b
| 20,598 |
py
|
Python
|
aea/protocols/generator/common.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
aea/protocols/generator/common.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
aea/protocols/generator/common.py
|
valory-xyz/agents-aea
|
8f38efa96041b0156ed1ae328178e395dbabf2fc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2022 Valory AG
# Copyright 2018-2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""This module contains utility code for generator modules."""
import inspect
import os
import re
import shutil
import subprocess # nosec
import sys
import tempfile
from pathlib import Path
from typing import Tuple
from aea.configurations.base import ProtocolSpecification
from aea.configurations.constants import (
DEFAULT_PROTOCOL_CONFIG_FILE,
PACKAGES,
PROTOCOL_LANGUAGE_JS,
PROTOCOL_LANGUAGE_PYTHON,
)
from aea.configurations.loader import ConfigLoader
from aea.helpers.io import open_file
SPECIFICATION_PRIMITIVE_TYPES = ["pt:bytes", "pt:int", "pt:float", "pt:bool", "pt:str"]
SPECIFICATION_COMPOSITIONAL_TYPES = [
"pt:set",
"pt:list",
"pt:dict",
"pt:union",
"pt:optional",
]
PYTHON_COMPOSITIONAL_TYPES = [
"FrozenSet",
"Tuple",
"Dict",
"Union",
"Optional",
]
MESSAGE_IMPORT = "from aea.protocols.base import Message"
SERIALIZER_IMPORT = "from aea.protocols.base import Serializer"
PATH_TO_PACKAGES = PACKAGES
INIT_FILE_NAME = "__init__.py"
PROTOCOL_YAML_FILE_NAME = DEFAULT_PROTOCOL_CONFIG_FILE
MESSAGE_DOT_PY_FILE_NAME = "message.py"
DIALOGUE_DOT_PY_FILE_NAME = "dialogues.py"
CUSTOM_TYPES_DOT_PY_FILE_NAME = "custom_types.py"
SERIALIZATION_DOT_PY_FILE_NAME = "serialization.py"
PYTHON_TYPE_TO_PROTO_TYPE = {
"bytes": "bytes",
"int": "int32",
"float": "float",
"bool": "bool",
"str": "string",
}
CURRENT_DIR = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore
ISORT_CONFIGURATION_FILE = os.path.join(CURRENT_DIR, "isort.cfg")
ISORT_CLI_ARGS = [
"--settings-path",
ISORT_CONFIGURATION_FILE,
"--quiet",
]
PROTOLINT_CONFIGURATION_FILE_NAME = "protolint.yaml"
PROTOLINT_CONFIGURATION = """lint:
rules:
remove:
- MESSAGE_NAMES_UPPER_CAMEL_CASE
- ENUM_FIELD_NAMES_ZERO_VALUE_END_WITH
- PACKAGE_NAME_LOWER_CASE
- REPEATED_FIELD_NAMES_PLURALIZED
- FIELD_NAMES_LOWER_SNAKE_CASE"""
PROTOLINT_INDENTATION_ERROR_STR = "incorrect indentation style"
PROTOLINT_ERROR_WHITELIST = [PROTOLINT_INDENTATION_ERROR_STR]
def _to_camel_case(text: str) -> str:
"""
Convert a text in snake_case format into the CamelCase format.
:param text: the text to be converted.
:return: The text in CamelCase format.
"""
return "".join(word.title() for word in text.split("_"))
def _camel_case_to_snake_case(text: str) -> str:
"""
Convert a text in CamelCase format into the snake_case format.
:param text: the text to be converted.
:return: The text in CamelCase format.
"""
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower()
def _match_brackets(text: str, index_of_open_bracket: int) -> int:
"""
Give the index of the matching close bracket for the opening bracket at 'index_of_open_bracket' in the input 'text'.
:param text: the text containing the brackets.
:param index_of_open_bracket: the index of the opening bracket.
:return: the index of the matching closing bracket (if any).
:raises SyntaxError if there are no matching closing bracket.
"""
if text[index_of_open_bracket] != "[":
raise SyntaxError(
"Index {} in 'text' is not an open bracket '['. It is {}".format(
index_of_open_bracket,
text[index_of_open_bracket],
)
)
open_bracket_stack = []
for index in range(index_of_open_bracket, len(text)):
if text[index] == "[":
open_bracket_stack.append(text[index])
elif text[index] == "]":
open_bracket_stack.pop()
if not open_bracket_stack:
return index
raise SyntaxError(
"No matching closing bracket ']' for the opening bracket '[' at {} "
+ str(index_of_open_bracket)
)
def _has_matched_brackets(text: str) -> bool:
"""
Evaluate whether every opening bracket '[' in the 'text' has a matching closing bracket ']'.
:param text: the text.
:return: Boolean result, and associated message.
"""
open_bracket_stack = []
for index, _ in enumerate(text):
if text[index] == "[":
open_bracket_stack.append(index)
elif text[index] == "]":
if len(open_bracket_stack) == 0:
return False
open_bracket_stack.pop()
return len(open_bracket_stack) == 0
def _get_sub_types_of_compositional_types(compositional_type: str) -> Tuple[str, ...]:
"""
Extract the sub-types of compositional types.
This method handles both specification types (e.g. pt:set[], pt:dict[]) as well as python types (e.g. FrozenSet[], Union[]).
:param compositional_type: the compositional type string whose sub-types are to be extracted.
:return: tuple containing all extracted sub-types.
"""
sub_types_list = list()
for valid_compositional_type in (
SPECIFICATION_COMPOSITIONAL_TYPES + PYTHON_COMPOSITIONAL_TYPES
):
if compositional_type.startswith(valid_compositional_type):
inside_string = compositional_type[
compositional_type.index("[") + 1 : compositional_type.rindex("]")
].strip()
while inside_string != "":
do_not_add = False
if inside_string.find(",") == -1: # No comma; this is the last sub-type
provisional_sub_type = inside_string.strip()
if (
provisional_sub_type == "..."
): # The sub-string is ... used for Tuple, e.g. Tuple[int, ...]
do_not_add = True
else:
sub_type = provisional_sub_type
inside_string = ""
else: # There is a comma; this MAY not be the last sub-type
sub_string_until_comma = inside_string[
: inside_string.index(",")
].strip()
if (
sub_string_until_comma.find("[") == -1
): # No open brackets; this is a primitive type and NOT the last sub-type
sub_type = sub_string_until_comma
inside_string = inside_string[
inside_string.index(",") + 1 :
].strip()
else: # There is an open bracket'['; this is a compositional type
try:
closing_bracket_index = _match_brackets(
inside_string, inside_string.index("[")
)
except SyntaxError:
raise SyntaxError(
"Bad formatting. No matching close bracket ']' for the open bracket at {}".format(
inside_string[
: inside_string.index("[") + 1
].strip()
)
)
sub_type = inside_string[: closing_bracket_index + 1].strip()
the_rest_of_inside_string = inside_string[
closing_bracket_index + 1 :
].strip()
if (
the_rest_of_inside_string.find(",") == -1
): # No comma; this is the last sub-type
inside_string = the_rest_of_inside_string.strip()
else: # There is a comma; this is not the last sub-type
inside_string = the_rest_of_inside_string[
the_rest_of_inside_string.index(",") + 1 :
].strip()
if not do_not_add:
sub_types_list.append(sub_type)
return tuple(sub_types_list)
raise SyntaxError(
"{} is not a valid compositional type.".format(compositional_type)
)
def _union_sub_type_to_protobuf_variable_name(
content_name: str, content_type: str
) -> str:
"""
Given a content of type union, create a variable name for its sub-type for protobuf.
:param content_name: the name of the content
:param content_type: the sub-type of a union type
:return: The variable name
"""
if content_type.startswith("FrozenSet"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
expanded_type_str = "set_of_{}".format(sub_type)
elif content_type.startswith("Tuple"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
expanded_type_str = "list_of_{}".format(sub_type)
elif content_type.startswith("Dict"):
sub_type_1 = _get_sub_types_of_compositional_types(content_type)[0]
sub_type_2 = _get_sub_types_of_compositional_types(content_type)[1]
expanded_type_str = "dict_of_{}_{}".format(sub_type_1, sub_type_2)
else:
expanded_type_str = content_type
protobuf_variable_name = "{}_type_{}".format(content_name, expanded_type_str)
return protobuf_variable_name
def _python_pt_or_ct_type_to_proto_type(content_type: str) -> str:
"""
Convert a PT or CT from python to their protobuf equivalent.
:param content_type: the python type
:return: The protobuf equivalent
"""
if content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys():
proto_type = PYTHON_TYPE_TO_PROTO_TYPE[content_type]
else:
proto_type = content_type
return proto_type
def _includes_custom_type(content_type: str) -> bool:
"""
Evaluate whether a content type is a custom type or has a custom type as a sub-type.
:param content_type: the content type
:return: Boolean result
"""
if content_type.startswith("Optional"):
sub_type = _get_sub_types_of_compositional_types(content_type)[0]
result = _includes_custom_type(sub_type)
elif content_type.startswith("Union"):
sub_types = _get_sub_types_of_compositional_types(content_type)
result = False
for sub_type in sub_types:
if _includes_custom_type(sub_type):
result = True
break
elif (
content_type.startswith("FrozenSet")
or content_type.startswith("Tuple")
or content_type.startswith("Dict")
or content_type in PYTHON_TYPE_TO_PROTO_TYPE.keys()
):
result = False
else:
result = True
return result
def is_installed(programme: str) -> bool:
"""
Check whether a programme is installed on the system.
:param programme: the name of the programme.
:return: True if installed, False otherwise
"""
res = shutil.which(programme)
return res is not None
def base_protolint_command() -> str:
"""
Return the base protolint command.
:return: The base protolint command
"""
if sys.platform.startswith("win"):
protolint_base_cmd = "protolint" # pragma: nocover
else:
protolint_base_cmd = "PATH=${PATH}:${GOPATH}/bin/:~/go/bin protolint"
return protolint_base_cmd
def check_prerequisites() -> None:
"""Check whether a programme is installed on the system."""
# check black code formatter is installed
if not is_installed("black"):
raise FileNotFoundError(
"Cannot find black code formatter! To install, please follow this link: https://black.readthedocs.io/en/stable/installation_and_usage.html"
)
# check isort code formatter is installed
if not is_installed("isort"):
raise FileNotFoundError(
"Cannot find isort code formatter! To install, please follow this link: https://pycqa.github.io/isort/#installing-isort"
)
# check protolint code formatter is installed
if subprocess.call(f"{base_protolint_command()} version", shell=True) != 0: # nosec
raise FileNotFoundError(
"Cannot find protolint protocol buffer schema file linter! To install, please follow this link: https://github.com/yoheimuta/protolint."
)
# check protocol buffer compiler is installed
if not is_installed("protoc"):
raise FileNotFoundError(
"Cannot find protocol buffer compiler! To install, please follow this link: https://developers.google.com/protocol-buffers/"
)
def get_protoc_version() -> str:
"""Get the protoc version used."""
result = subprocess.run( # nosec
["protoc", "--version"], stdout=subprocess.PIPE, check=True
)
result_str = result.stdout.decode("utf-8").strip("\n").strip("\r")
return result_str
def load_protocol_specification(specification_path: str) -> ProtocolSpecification:
"""
Load a protocol specification.
:param specification_path: path to the protocol specification yaml file.
:return: A ProtocolSpecification object
"""
config_loader = ConfigLoader(
"protocol-specification_schema.json", ProtocolSpecification
)
protocol_spec = config_loader.load_protocol_specification(
open_file(specification_path)
)
return protocol_spec
def _create_protocol_file(
path_to_protocol_package: str, file_name: str, file_content: str
) -> None:
"""
Create a file in the generated protocol package.
:param path_to_protocol_package: path to the file
:param file_name: the name of the file
:param file_content: the content of the file
"""
pathname = os.path.join(path_to_protocol_package, file_name)
with open_file(pathname, "w") as file:
file.write(file_content)
def try_run_black_formatting(path_to_protocol_package: str) -> None:
"""
Run Black code formatting via subprocess.
:param path_to_protocol_package: a path where formatting should be applied.
"""
subprocess.run( # nosec
[sys.executable, "-m", "black", path_to_protocol_package, "--quiet"],
check=True,
)
def try_run_isort_formatting(path_to_protocol_package: str) -> None:
"""
Run Isort code formatting via subprocess.
:param path_to_protocol_package: a path where formatting should be applied.
"""
subprocess.run( # nosec
[sys.executable, "-m", "isort", *ISORT_CLI_ARGS, path_to_protocol_package],
check=True,
)
def try_run_protoc(
path_to_generated_protocol_package: str,
name: str,
language: str = PROTOCOL_LANGUAGE_PYTHON,
) -> None:
"""
Run 'protoc' protocol buffer compiler via subprocess.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:param language: the target language in which to compile the protobuf schema file
"""
# for closure-styled imports for JS, comment the first line and uncomment the second
js_commonjs_import_option = (
"import_style=commonjs,binary:" if language == PROTOCOL_LANGUAGE_JS else ""
)
language_part_of_the_command = f"--{language}_out={js_commonjs_import_option}{path_to_generated_protocol_package}"
subprocess.run( # nosec
[
"protoc",
f"-I={path_to_generated_protocol_package}",
language_part_of_the_command,
f"{path_to_generated_protocol_package}/{name}.proto",
],
stderr=subprocess.PIPE,
encoding="utf-8",
check=True,
env=os.environ.copy(),
)
def try_run_protolint(path_to_generated_protocol_package: str, name: str) -> None:
"""
Run 'protolint' linter via subprocess.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
"""
# path to proto file
path_to_proto_file = os.path.join(
path_to_generated_protocol_package,
f"{name}.proto",
)
# Dump protolint configuration into a temporary file
temp_dir = tempfile.mkdtemp()
path_to_configuration_in_tmp_file = Path(
temp_dir, PROTOLINT_CONFIGURATION_FILE_NAME
)
with open_file(path_to_configuration_in_tmp_file, "w") as file:
file.write(PROTOLINT_CONFIGURATION)
# Protolint command
cmd = f'{base_protolint_command()} lint -config_path={path_to_configuration_in_tmp_file} -fix "{path_to_proto_file}"'
# Execute protolint command
subprocess.run( # nosec
cmd,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
encoding="utf-8",
check=True,
env=os.environ.copy(),
shell=True,
)
# Delete temporary configuration file
shutil.rmtree(temp_dir) # pragma: no cover
def check_protobuf_using_protoc(
path_to_generated_protocol_package: str, name: str
) -> Tuple[bool, str]:
"""
Check whether a protocol buffer schema file is valid.
Validation is via trying to compile the schema file. If successfully compiled it is valid, otherwise invalid.
If valid, return True and a 'protobuf file is valid' message, otherwise return False and the error thrown by the compiler.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:return: Boolean result and an accompanying message
"""
try:
try_run_protoc(path_to_generated_protocol_package, name)
os.remove(os.path.join(path_to_generated_protocol_package, name + "_pb2.py"))
return True, "protobuf file is valid"
except subprocess.CalledProcessError as e:
pattern = name + ".proto:[0-9]+:[0-9]+: "
error_message = re.sub(pattern, "", e.stderr[:-1])
return False, error_message
def compile_protobuf_using_protoc(
path_to_generated_protocol_package: str, name: str, language: str
) -> Tuple[bool, str]:
"""
Compile a protocol buffer schema file using protoc.
If successfully compiled, return True and a success message,
otherwise return False and the error thrown by the compiler.
:param path_to_generated_protocol_package: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:param language: the target language in which to compile the protobuf schema file
:return: Boolean result and an accompanying message
"""
try:
try_run_protoc(path_to_generated_protocol_package, name, language)
return True, "protobuf schema successfully compiled"
except subprocess.CalledProcessError as e:
pattern = name + ".proto:[0-9]+:[0-9]+: "
error_message = re.sub(pattern, "", e.stderr[:-1])
return False, error_message
def apply_protolint(path_to_proto_file: str, name: str) -> Tuple[bool, str]:
"""
Apply protolint linter to a protocol buffer schema file.
If no output, return True and a success message,
otherwise return False and the output shown by the linter
(minus the indentation suggestions which are automatically fixed by protolint).
:param path_to_proto_file: path to the protocol buffer schema file.
:param name: name of the protocol buffer schema file.
:return: Boolean result and an accompanying message
"""
try:
try_run_protolint(path_to_proto_file, name)
return True, "protolint has no output"
except subprocess.CalledProcessError as e:
lines_to_show = []
for line in e.stderr.split("\n"):
to_show = True
for whitelist_error_str in PROTOLINT_ERROR_WHITELIST:
if whitelist_error_str in line:
to_show = False
break
if to_show:
lines_to_show.append(line)
error_message = "\n".join(lines_to_show)
return False, error_message
| 35.636678 | 151 | 0.646373 |
bccd22c451ca48a6b2b63fe4d46e6f3d5177271f
| 12,177 |
py
|
Python
|
tests/unit/python/foglamp/services/core/api/test_backup_restore.py
|
vaibhav-ScaleDB/FogLAMP
|
445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/services/core/api/test_backup_restore.py
|
vaibhav-ScaleDB/FogLAMP
|
445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/python/foglamp/services/core/api/test_backup_restore.py
|
vaibhav-ScaleDB/FogLAMP
|
445e7a588f5ec5fcae0360b49fdc4e4de0ea2ec8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# FOGLAMP_BEGIN
# See: http://foglamp.readthedocs.io/
# FOGLAMP_END
import os
import asyncio
import json
from unittest.mock import MagicMock, patch
from collections import Counter
from aiohttp import web
import pytest
from foglamp.services.core import routes
from foglamp.services.core import connect
from foglamp.plugins.storage.common.backup import Backup
from foglamp.plugins.storage.common.restore import Restore
from foglamp.plugins.storage.common import exceptions
from foglamp.services.core.api import backup_restore
from foglamp.common.storage_client.storage_client import StorageClientAsync
__author__ = "Vaibhav Singhal"
__copyright__ = "Copyright (c) 2017 OSIsoft, LLC"
__license__ = "Apache 2.0"
__version__ = "${VERSION}"
| 46.834615 | 123 | 0.635214 |
bccd4ecf3e75810f078465ed5395ba34d886f56a
| 3,690 |
py
|
Python
|
pyemits/core/preprocessing/dimensional_reduction.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | 6 |
2021-10-21T14:13:25.000Z
|
2021-12-26T12:22:51.000Z
|
pyemits/core/preprocessing/dimensional_reduction.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | null | null | null |
pyemits/core/preprocessing/dimensional_reduction.py
|
thompson0012/PyEmits
|
9cb6fbf27ca7e8952ed5aca26118055e04492c23
|
[
"Apache-2.0"
] | null | null | null |
"""
Why need dimensional reduction
The following is the use of dimensionality reduction in the data set:
As data dimensions continue to decrease, the space required for data storage will also decrease.
Low-dimensional data helps reduce calculation/training time.
Some algorithms tend to perform poorly on high-dimensional data, and dimensionality reduction can improve algorithm availability.
Dimensionality reduction can solve the problem of multicollinearity by removing redundant features. For example, we have two variables: "On the treadmill for a period of time
Time spent and calorie consumption. These two variables are highly correlated. The longer the time spent on the treadmill, the more calories burned.
Naturally, the more. Therefore, it does not make much sense to store these two data at the same time, just one is enough.
Dimensionality reduction helps data visualization. As mentioned earlier, if the dimensionality of the data is very high, the visualization will become quite difficult, while drawing two-dimensional three-dimensional
The graph of dimensional data is very simple.
Common dimensional reduction techniques:
1. missing value ratio
2. low variance filter
3. high correlation filter
4. random forest
5. backward feature elimination
6. forward feature selection
7. factor analysis
8. principle components analysis
9. independent component analysis
10. IOSMAP
11. t-SNE
12. UMAP
"""
random_state = 0
from enum import Enum
| 30.495868 | 217 | 0.714363 |
bccea585927cd051fb0a3ed4b33c0aada5c1d9b8
| 456 |
py
|
Python
|
sample_project/exam/exam.py
|
pcse/gitlab_tools
|
2896b636b0f8955bdb5f2236e257cc5d3efd54d7
|
[
"BSD-3-Clause"
] | null | null | null |
sample_project/exam/exam.py
|
pcse/gitlab_tools
|
2896b636b0f8955bdb5f2236e257cc5d3efd54d7
|
[
"BSD-3-Clause"
] | null | null | null |
sample_project/exam/exam.py
|
pcse/gitlab_tools
|
2896b636b0f8955bdb5f2236e257cc5d3efd54d7
|
[
"BSD-3-Clause"
] | 1 |
2022-03-17T16:51:08.000Z
|
2022-03-17T16:51:08.000Z
|
"""
These methods can be called inside WebCAT to determine which tests are loaded
for a given section/exam pair. This allows a common WebCAT submission site to
support different project tests
"""
| 25.333333 | 78 | 0.723684 |
bccf9e77bf6eaccd18d5b5a8053e3859146a0272
| 2,727 |
py
|
Python
|
scrapy/clarinetear/spiders/pagina12.py
|
ramiror/clarinete
|
4ebf37cf9f705e04e2aad15015be12c48fe25fd3
|
[
"BSD-2-Clause"
] | null | null | null |
scrapy/clarinetear/spiders/pagina12.py
|
ramiror/clarinete
|
4ebf37cf9f705e04e2aad15015be12c48fe25fd3
|
[
"BSD-2-Clause"
] | null | null | null |
scrapy/clarinetear/spiders/pagina12.py
|
ramiror/clarinete
|
4ebf37cf9f705e04e2aad15015be12c48fe25fd3
|
[
"BSD-2-Clause"
] | null | null | null |
from datetime import datetime
import scrapy
import lxml
from lxml.html.clean import Cleaner
import re
SOURCE = 'Pgina 12'
LANGUAGE = 'es'
cleaner = Cleaner(allow_tags=['p', 'br', 'b', 'a', 'strong', 'i', 'em'])
| 32.855422 | 104 | 0.521819 |
bccfcca536c98cf3954ec419341b10079911dafc
| 6,978 |
py
|
Python
|
svd.py
|
christyc14/fyp
|
c63e719e383a84eb49ffa0c8bd901bfd4aef5864
|
[
"MIT"
] | null | null | null |
svd.py
|
christyc14/fyp
|
c63e719e383a84eb49ffa0c8bd901bfd4aef5864
|
[
"MIT"
] | null | null | null |
svd.py
|
christyc14/fyp
|
c63e719e383a84eb49ffa0c8bd901bfd4aef5864
|
[
"MIT"
] | null | null | null |
from calendar import c
from typing import Dict, List, Union
from zlib import DEF_BUF_SIZE
import json_lines
import numpy as np
import re
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import pandas as pd
import json
from scipy.sparse.linalg import svds
from scipy.spatial import distance
import os
import streamlit as st
if __name__ == "__main__":
file_path = os.path.dirname(__file__)
if file_path != "":
os.chdir(file_path)
products: List[Dict[str, Union[str, List[str]]]] = []
# input data into List
with open("../cbscraper/product_urls_with_reviews.jsonlines", "rb") as f:
unique = set()
lines = f.read().splitlines()
df_inter = pd.DataFrame(lines)
df_inter.columns = ["json_element"]
df_inter["json_element"].apply(json.loads)
df = pd.json_normalize(df_inter["json_element"].apply(json.loads))
# to save myself if i do something dumb and run the scraper without deleting the .jsonlines file
df.drop_duplicates(subset=["url"], inplace=True)
# option: category of product, eg cleanser
categories = set(df.category.values)
# filter data by given option
print("Hello world!")
print("Welcome!")
print(categories)
print("pls enter the category:")
cat = str(input())
display_product_names = df[df.category == cat]
print(display_product_names[["brand", "product_name"]])
print("pls enter your top 3 products indices, separated by a new line")
item1 = int(input())
item2 = int(input())
item3 = int(input())
print("pls enter # of recs:")
num_recs = int(input())
reviews = display_product_names.explode("review_data")
reviews["username"] = reviews["review_data"].apply(lambda x: x["UserNickname"])
grouped_reviews = reviews.groupby("username")["review_data"].apply(list)
multiple_rating_users = set(grouped_reviews[grouped_reviews.map(len) > 1].index)
print(multiple_rating_users)
print("pls enter sephora userid, if you don't have one just enter 'none':")
username = str(input())
if username == "none":
print("your ingredients based recommendations are:")
cbf = content_recommender(
cat,
df.product_name.values[item1],
df.product_name.values[item2],
df.product_name.values[item3],
num_recs,
df,
)
print(cbf[["brand", "product_name", "url", "avg_rating"]])
else:
cbf = content_recommender(
cat,
df.product_name.values[item1],
df.product_name.values[item2],
df.product_name.values[item3],
num_recs + 10,
df,
)
cf = collab_recommender(cbf, num_recs, username)
print("your hybrid recommendations are:")
print(cf[["brand", "product_name", "url", "pred_rating"]])
print("thank u for using this service :)")
| 38.131148 | 111 | 0.655775 |
bccff1b3d6077ecdb8e86f1fedd69c5761247393
| 22,448 |
py
|
Python
|
esp32/tools/flasher.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | null | null | null |
esp32/tools/flasher.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | null | null | null |
esp32/tools/flasher.py
|
rodgergr/pycom-micropython-sigfox
|
50a31befc40a39b1e4c3513f20da968792227b0e
|
[
"MIT"
] | 1 |
2019-09-22T01:28:52.000Z
|
2019-09-22T01:28:52.000Z
|
#!/usr/bin/env python
#
# Copyright (c) 2018, Pycom Limited.
#
# This software is licensed under the GNU GPL version 3 or any
# later version, with permitted additional terms. For more information
# see the Pycom Licence v1.0 document supplied with this file, or
# available at https://www.pycom.io/opensource/licensing
#
"""
Flash the ESP32 (bootloader, partitions table and factory app).
How to call esptool:
python esptool.py '--chip', 'esp32', '--port', /dev/ttyUSB0, '--baud', '921600', 'write_flash', '-z', '--flash_mode', 'dio', '--flash_freq', '40m', '--flash_size', 'detect', '0x1000', bootloader.bin, '0x8000', partitions.bin, '0x10000', application.bin, '0x3FF000', 'config_no_wifi.bin'
"""
from esptool import ESP32ROM
import os
import sys
import struct
import sqlite3
import argparse
import subprocess
import threading
import time
import fw_version
import csv
working_threads = {}
macs_db = None
wmacs = {}
DB_MAC_UNUSED = 0
DB_MAC_ERROR = -1
DB_MAC_LOCK = -2
DB_MAC_OK = 1
if __name__ == "__main__":
main()
| 42.116323 | 286 | 0.513587 |
bccff8756b8fd9c49c849a5ee7e86c1a5271fe95
| 2,315 |
py
|
Python
|
hknweb/events/tests/models/utils.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
hknweb/events/tests/models/utils.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
hknweb/events/tests/models/utils.py
|
jyxzhang/hknweb
|
a01ffd8587859bf63c46213be6a0c8b87164a5c2
|
[
"MIT"
] | null | null | null |
import datetime
from django.utils import timezone
from django.contrib.auth.models import User
from hknweb.events.models import Event, EventType, Rsvp
| 28.9375 | 88 | 0.581425 |
bcd088f1e5c34ccfa8be8350d7cb0a6ebc06a38b
| 4,979 |
py
|
Python
|
HealthNet/prescriptions/views.py
|
jimga150/HealthNet
|
84e55302b02221ae6e93640904af837fdfe09a83
|
[
"MIT"
] | null | null | null |
HealthNet/prescriptions/views.py
|
jimga150/HealthNet
|
84e55302b02221ae6e93640904af837fdfe09a83
|
[
"MIT"
] | null | null | null |
HealthNet/prescriptions/views.py
|
jimga150/HealthNet
|
84e55302b02221ae6e93640904af837fdfe09a83
|
[
"MIT"
] | null | null | null |
from django.shortcuts import redirect
from .forms import PrescriptionForm
from core.views import is_doctor, is_nurse, is_admin, is_patient
from core.models import *
from .models import Prescription
from django.contrib.auth.decorators import login_required, user_passes_test
from django.utils import timezone
from django.shortcuts import render
from django.core.urlresolvers import reverse
def not_admin(user):
"""
:param user: The User in question
:return: True if the user is anything but an Admin
"""
return not is_admin(user)
def is_doctor_or_nurse(user):
"""
:param user: The User in question
:return: True if the user is a Doctor or Nurse
"""
return is_doctor(user) or is_nurse(user)
def get_prescription_list_for(cpatient):
"""
Generic getter for a specific patient's prescription list
:param cpatient: Patient to fetch list for
:return: context of Prescription list
"""
Prescriptions = Prescription.objects.all().filter(patient=cpatient)
per = []
for p in Prescriptions.iterator():
per.append(str(dict(p.TIME_CHOICES)[p.Time_units]))
p_list = zip(Prescriptions, per)
return {"Labels": ["Doctor", "Drug", "Dosage", "Rate"], "Name": str(cpatient), "Prescriptions": p_list}
| 33.193333 | 119 | 0.69753 |
bcd22bd32e41749d160e83a36693fbb03e02a7c0
| 2,232 |
py
|
Python
|
algorithms/329. Longest Increasing Path in a Matrix.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | 1 |
2020-12-02T13:54:30.000Z
|
2020-12-02T13:54:30.000Z
|
algorithms/329. Longest Increasing Path in a Matrix.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
algorithms/329. Longest Increasing Path in a Matrix.py
|
woozway/py3-leetcode
|
e51a9ce7a6bb3e35c0fcb8c8f4f6cd5763708dbf
|
[
"MIT"
] | null | null | null |
"""
1. Clarification
2. Possible solutions
- dfs + memoization
- Topological sort
3. Coding
4. Tests
"""
# T=O(m*n), S=O(m*n)
from functools import lru_cache
# T=O(m*n), S=O(m*n)
| 30.162162 | 111 | 0.471774 |
bcd344e1483580a8d86580469eef57c0ac31bfc7
| 1,511 |
py
|
Python
|
cocos2d/tools/coding-style/tailing-spaces.py
|
NIKEA-SOFT/TestGame
|
04f13e5f1324bca9f1e47f02037ea1eddd3bcc8f
|
[
"MIT"
] | 898 |
2020-01-09T12:03:08.000Z
|
2022-03-31T07:59:46.000Z
|
cocos2d/tools/coding-style/tailing-spaces.py
|
NIKEA-SOFT/TestGame
|
04f13e5f1324bca9f1e47f02037ea1eddd3bcc8f
|
[
"MIT"
] | 172 |
2020-02-21T08:56:42.000Z
|
2021-05-12T03:18:40.000Z
|
cocos2d/tools/coding-style/tailing-spaces.py
|
NIKEA-SOFT/TestGame
|
04f13e5f1324bca9f1e47f02037ea1eddd3bcc8f
|
[
"MIT"
] | 186 |
2020-01-13T09:34:30.000Z
|
2022-03-22T04:48:48.000Z
|
#!/usr/bin/env python
#coding=utf-8
'''
Remove tailing whitespaces and ensures one and only one empty ending line.
'''
import os, re
main()
| 24.370968 | 85 | 0.608868 |
bcd3b0b0dedcabbec5fd0840549ab45783c9eb2d
| 4,096 |
py
|
Python
|
three.py/TestPostprocessing-8Bit.py
|
Michael-Pascale/three.py
|
9912f5f850245fb9456a25b6737e12290ae54a2d
|
[
"MIT"
] | null | null | null |
three.py/TestPostprocessing-8Bit.py
|
Michael-Pascale/three.py
|
9912f5f850245fb9456a25b6737e12290ae54a2d
|
[
"MIT"
] | null | null | null |
three.py/TestPostprocessing-8Bit.py
|
Michael-Pascale/three.py
|
9912f5f850245fb9456a25b6737e12290ae54a2d
|
[
"MIT"
] | null | null | null |
from core import *
from cameras import *
from geometry import *
from material import *
from lights import *
# instantiate and run the program
TestPostprocessing2().run()
| 32 | 99 | 0.577148 |
bcd3c580510f803674768f898ad9016345f92071
| 3,027 |
py
|
Python
|
scripts/test_cache_size_vs_code_balance.py
|
tareqmalas/girih
|
0c126788937d189147be47115703b752235e585c
|
[
"BSD-3-Clause"
] | 7 |
2015-07-14T08:29:14.000Z
|
2021-07-30T14:53:13.000Z
|
scripts/test_cache_size_vs_code_balance.py
|
tareqmalas/girih
|
0c126788937d189147be47115703b752235e585c
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/test_cache_size_vs_code_balance.py
|
tareqmalas/girih
|
0c126788937d189147be47115703b752235e585c
|
[
"BSD-3-Clause"
] | 3 |
2016-08-30T01:25:40.000Z
|
2017-06-22T05:50:05.000Z
|
#!/usr/bin/env python
if __name__ == "__main__":
main()
| 31.863158 | 218 | 0.573835 |
bcd4aa69ca55232166dab3fedc825cb402a37789
| 654 |
py
|
Python
|
generate/lib/run-firefox/firefox_runner.py
|
flamencist/browser-extensions
|
cc2424ce69c718f9f6b1fb0e6cd19759ba384591
|
[
"BSD-3-Clause"
] | 102 |
2015-01-09T22:12:00.000Z
|
2021-04-21T01:18:51.000Z
|
generate/lib/run-firefox/firefox_runner.py
|
flamencist/browser-extensions
|
cc2424ce69c718f9f6b1fb0e6cd19759ba384591
|
[
"BSD-3-Clause"
] | 17 |
2015-01-24T22:30:47.000Z
|
2020-11-19T01:13:32.000Z
|
generate/lib/run-firefox/firefox_runner.py
|
flamencist/browser-extensions
|
cc2424ce69c718f9f6b1fb0e6cd19759ba384591
|
[
"BSD-3-Clause"
] | 33 |
2015-01-15T16:11:15.000Z
|
2021-06-11T12:15:29.000Z
|
import os
import shutil
import codecs
import json
from cuddlefish.runner import run_app
from cuddlefish.rdf import RDFManifest
| 28.434783 | 92 | 0.785933 |
bcd52639c509cc2628a1148eef258524825f4528
| 8,408 |
py
|
Python
|
pyripple/protocol/orderbook.py
|
gip/pyripple
|
d0c696bed7c6ad4c2309733484f9915074f9acdd
|
[
"Apache-2.0"
] | null | null | null |
pyripple/protocol/orderbook.py
|
gip/pyripple
|
d0c696bed7c6ad4c2309733484f9915074f9acdd
|
[
"Apache-2.0"
] | null | null | null |
pyripple/protocol/orderbook.py
|
gip/pyripple
|
d0c696bed7c6ad4c2309733484f9915074f9acdd
|
[
"Apache-2.0"
] | null | null | null |
# PyRipple
#
# Copyright 2015 Gilles Pirio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gilles Pirio <[email protected]>
"""
import numpy as np
import pandas as pd
import mpmath as mp
from mpmath import mpf
import matplotlib
import matplotlib.pyplot as plt
import json
| 37.20354 | 154 | 0.593958 |
bcd61a8f67cde91f10cbb1a9264485fd9ef2e8b8
| 3,205 |
py
|
Python
|
myvenv/lib/python3.6/site-packages/nltk/test/unit/test_senna.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 69 |
2020-03-31T06:40:17.000Z
|
2022-02-25T11:48:18.000Z
|
myvenv/lib/python3.6/site-packages/nltk/test/unit/test_senna.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 11 |
2019-12-26T17:21:03.000Z
|
2022-03-21T22:17:07.000Z
|
myvenv/lib/python3.6/site-packages/nltk/test/unit/test_senna.py
|
catb0y/twitter_feeling
|
9092a26f2554bbf6b14b33d797abaffa48cda99c
|
[
"MIT"
] | 28 |
2020-04-15T15:24:17.000Z
|
2021-12-26T04:05:02.000Z
|
# -*- coding: utf-8 -*-
"""
Unit tests for Senna
"""
from __future__ import unicode_literals
from os import environ, path, sep
import logging
import unittest
from nltk.classify import Senna
from nltk.tag import SennaTagger, SennaChunkTagger, SennaNERTagger
# Set Senna executable path for tests if it is not specified as an environment variable
if 'SENNA' in environ:
SENNA_EXECUTABLE_PATH = path.normpath(environ['SENNA']) + sep
else:
SENNA_EXECUTABLE_PATH = '/usr/share/senna-v3.0'
senna_is_installed = path.exists(SENNA_EXECUTABLE_PATH)
| 42.733333 | 108 | 0.597504 |
bcd716fdc72869755eef1e517937f6675edfef9d
| 8,191 |
py
|
Python
|
eoxserver/services/opensearch/v11/description.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 25 |
2015-08-10T19:34:34.000Z
|
2021-02-05T08:28:01.000Z
|
eoxserver/services/opensearch/v11/description.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 153 |
2015-01-20T08:35:49.000Z
|
2022-03-16T11:00:56.000Z
|
eoxserver/services/opensearch/v11/description.py
|
kalxas/eoxserver
|
8073447d926f3833923bde7b7061e8a1658dee06
|
[
"OML"
] | 10 |
2015-01-23T15:48:30.000Z
|
2021-01-21T15:41:18.000Z
|
#-------------------------------------------------------------------------------
#
# Project: EOxServer <http://eoxserver.org>
# Authors: Fabian Schindler <[email protected]>
#
#-------------------------------------------------------------------------------
# Copyright (C) 2015 EOX IT Services GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies of this Software or works derived from this Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#-------------------------------------------------------------------------------
from itertools import chain
from lxml.builder import ElementMaker
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from django.shortcuts import get_object_or_404
from eoxserver.core.config import get_eoxserver_config
from eoxserver.core.util.xmltools import (
XMLEncoder, NameSpace, NameSpaceMap
)
from eoxserver.resources.coverages import models
from eoxserver.services.opensearch.formats import get_formats
from eoxserver.services.opensearch.extensions import get_extensions
from eoxserver.services.opensearch.config import OpenSearchConfigReader
| 37.746544 | 81 | 0.580393 |
bcd88cb9aee8377371dcb96cf615ef4e2ec10580
| 4,113 |
py
|
Python
|
exercises/level_0/stringing.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 6 |
2019-03-29T06:14:53.000Z
|
2021-10-15T23:42:36.000Z
|
exercises/level_0/stringing.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 4 |
2019-09-06T10:03:40.000Z
|
2022-03-11T23:30:55.000Z
|
exercises/level_0/stringing.py
|
eliranM98/python_course
|
d9431dd6c0f27fca8ca052cc2a821ed0b883136c
|
[
"MIT"
] | 12 |
2019-06-20T19:34:52.000Z
|
2021-10-15T23:42:39.000Z
|
text = '''
Victor Hugo's ({}) tale of injustice, heroism and love follows the fortunes of Jean Valjean, an escaped convict determined to put his criminal past behind him. But his attempts to become a respected member of the community are constantly put under threat: by his own conscience, when, owing to a case of mistaken identity, another man is arrested in his place; and by the relentless investigations of the dogged Inspector Javert. It is not simply for himself that Valjean must stay free, however, for he has sworn to protect the baby daughter of Fantine, driven to prostitution by poverty.
Norman Denny's ({}) lively English translation is accompanied by an introduction discussing Hugo's political and artistic aims in writing Les Miserables.
Victor Hugo (1802-85) wrote volumes of criticism, dramas, satirical verse and political journalism but is best remembered for his novels, especially Notre-Dame de Paris (also known as The Hunchback of Notre-Dame) and Les Miserables, which was adapted into one of the most successful musicals of all time.
'All human life is here'
Cameron Mackintosh, producer of the musical Les Miserables
'One of the half-dozen greatest novels of the world'
Upton Sinclair
'A great writer - inventive, witty, sly, innovatory'
A. S. Byatt, author of Possession
'''
name = 'Victor'
word1 = 'writer'
word2 = 'witty'
numbers = "0123456789"
small_letters = 'abcdefghijklmnopqrstuvwxyz'
big_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
name_index = text.find(name)
name_plus3 = text[name_index: name_index+len(name)+3]
word1_index = text.find(word1, 0, 100)
word2_index = text.find(word2, int(len(text)/2), len(text))
count_characters = text.count('of')
is_text_starts_with_name = text.startswith(name)
is_text_ends_with_name = text.endswith(name)
text = text.format('1822-95', '1807-63')
words = text.split(' ')
text1 = ''.join(words)
text2 = ','.join(words)
text3 = '_'.join(words)
text4 = ' '.join(words)
text5 = text.replace('of', '@')
text6 = text.capitalize()
text7 = text.replace('a', '')
text8 = text.strip()
upper_name = name.upper()
lower_name = name.lower()
is_name_upper = name.isupper()
is_name_lower = name.islower()
is_big_letters_upper = big_letters.isupper()
is_small_letters_lower = small_letters.islower()
stringed_integer = '90'.isnumeric()
stringed_float = '90.5'.isnumeric()
converted_int = int('90')
converted_float = float('90.5')
converted_string = str(183)
is_digit = converted_string[1].isdigit()
edges = small_letters[0] + big_letters[-1]
body = numbers[1:-1]
evens = numbers[::2]
odds = numbers[1::2]
print('name', name)
print('word1', word1)
print('word2', word2)
print('numbers', numbers)
print('small_letters', small_letters)
print('big_letters', big_letters)
print('name_index', name_index)
print('name_plus3', name_plus3)
print('word1_index', word1_index)
print('word2_index', word2_index)
print('count_characters -> \'of\' in the text', count_characters)
print('is_text_starts_with_name', is_text_starts_with_name)
print('is_text_ends_with_name', is_text_ends_with_name)
print('\n\n\n\n\n', 'text', text, '\n\n\n\n\n')
print('\n\n\n\n\n', 'words', words, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text1', text1, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text2', text2, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text3', text3, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text4', text4, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text5', text5, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text6', text6, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text7', text7, '\n\n\n\n\n')
print('\n\n\n\n\n', 'text8', text8, '\n\n\n\n\n')
print('upper_name', upper_name)
print('lower_name', lower_name)
print('is_name_upper', is_name_upper)
print('is_name_lower', is_name_lower)
print('is_big_letters_upper', is_big_letters_upper)
print('is_small_letters_lower', is_small_letters_lower)
print('stringed_integer', stringed_integer)
print('stringed_float', stringed_float)
print('converted_int', converted_int)
print('converted_float', converted_float)
print('converted_string', converted_string)
print('is_digit', is_digit)
print('edges', edges)
print('body', body)
print('evens', evens)
print('odds', odds)
| 41.545455 | 590 | 0.735959 |
bcda0fb17ff31d81f09ba63207547e8568fa2ae6
| 2,085 |
py
|
Python
|
lab1/text_recognizer/models/mlp.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
lab1/text_recognizer/models/mlp.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
lab1/text_recognizer/models/mlp.py
|
Agyey/fsdl-text-recognizer-2021-labs
|
4bd85042ab9f6decd78849bb655c197cc13ffc11
|
[
"MIT"
] | null | null | null |
from typing import Any, Dict
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
FC1_DIM = 1024
FC2_DIM = 128
| 29.366197 | 95 | 0.533813 |
bcda1861cc6349c05142c05367f155b32d44ad1c
| 979 |
py
|
Python
|
frontend/widgets/button.py
|
AzoeDesarrollos/PyMavisDatabase
|
bfcd0557f63a4d8a73f0f8e891c47b47a1de1b45
|
[
"MIT"
] | null | null | null |
frontend/widgets/button.py
|
AzoeDesarrollos/PyMavisDatabase
|
bfcd0557f63a4d8a73f0f8e891c47b47a1de1b45
|
[
"MIT"
] | 2 |
2019-10-05T14:20:11.000Z
|
2019-10-05T14:22:31.000Z
|
frontend/widgets/button.py
|
AzoeDesarrollos/PyMavisDatabase
|
bfcd0557f63a4d8a73f0f8e891c47b47a1de1b45
|
[
"MIT"
] | null | null | null |
from pygame import Surface, font
from .basewidget import BaseWidget
from frontend import Renderer, WidgetHandler
| 27.971429 | 74 | 0.592441 |
bcda32ab85ecef62e60d41fc5f944271b774ca47
| 709 |
py
|
Python
|
tensorflow_rnn/mnist_lstm.py
|
naoki009/samples
|
dac3bbddbd06374c39768cbe17fefd0110fe316f
|
[
"BSD-2-Clause"
] | null | null | null |
tensorflow_rnn/mnist_lstm.py
|
naoki009/samples
|
dac3bbddbd06374c39768cbe17fefd0110fe316f
|
[
"BSD-2-Clause"
] | null | null | null |
tensorflow_rnn/mnist_lstm.py
|
naoki009/samples
|
dac3bbddbd06374c39768cbe17fefd0110fe316f
|
[
"BSD-2-Clause"
] | 1 |
2020-08-14T11:44:42.000Z
|
2020-08-14T11:44:42.000Z
|
import numpy as np
import tensorflow as tf
"""
Do an MNIST classification line by line by LSTM
"""
(x_train, y_train), \
(x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
model = tf.keras.Sequential()
model.add(tf.keras.layers.LSTM(128, input_shape=(None, 28)))
#model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Activation("softmax"))
model.summary()
model.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer="sgd",
metrics=["accuracy"])
model.fit(x_train, y_train, validation_data=(x_test, y_test),
batch_size=100, epochs=100)
| 27.269231 | 67 | 0.70945 |
bcdd9f6e351b12352ead172914df612d99371de2
| 984 |
py
|
Python
|
scrap/CloudCoverUndersampling.py
|
cseale/kaggle-amazon-rainforests
|
cf42941bb3c70ba19257764b66fe33550be88e0b
|
[
"Apache-2.0"
] | null | null | null |
scrap/CloudCoverUndersampling.py
|
cseale/kaggle-amazon-rainforests
|
cf42941bb3c70ba19257764b66fe33550be88e0b
|
[
"Apache-2.0"
] | null | null | null |
scrap/CloudCoverUndersampling.py
|
cseale/kaggle-amazon-rainforests
|
cf42941bb3c70ba19257764b66fe33550be88e0b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import os
from random import shuffle
from tqdm import tqdm
DATA_DIR = '../input/amazon/'
TRAIN_TIF_DIR = DATA_DIR + 'train-tif/'
TRAIN_CSV = DATA_DIR + 'train.csv'
TEST_TIF_DIR = DATA_DIR + 'test-tif/'
IMG_SIZE = 100
LR = 1e-3
MODEL_NAME = 'amazon=-{}-{}.model'.format(LR, '2conv-basic')
CLOUD_COVER_LABELS = [
'clear',
'cloudy',
'haze',
'partly_cloudy']
# read our data and take a look at what we are dealing with
train_csv = pd.read_csv(TRAIN_CSV)
train_csv.head()
tags = pd.DataFrame()
for label in CLOUD_COVER_LABELS:
tags[label] = train_csv.tags.apply(lambda x: np.where(label in x, 1, 0))
train_csv = pd.concat([train_csv, tags], axis=1)
# In[17]:
pd.concat([train_csv[train_csv.clear == 1].sample(n=7251),
train_csv[train_csv.cloudy == 1].sample(n=7251),
train_csv[train_csv.haze == 1],
train_csv[train_csv.partly_cloudy == 1].sample(n=7251)], axis=0, ignore_index=True)
| 20.93617 | 83 | 0.690041 |
bcddbefe85e0c400583bdfd288157408fcf8f518
| 11,271 |
py
|
Python
|
rpython/translator/platform/posix.py
|
wdv4758h/mu-client-pypy
|
d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
rpython/translator/platform/posix.py
|
wdv4758h/mu-client-pypy
|
d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
rpython/translator/platform/posix.py
|
wdv4758h/mu-client-pypy
|
d2fcc01f0b4fe3ffa232762124e3e6d38ed3a0cf
|
[
"Apache-2.0",
"OpenSSL"
] | null | null | null |
"""Base support for POSIX-like platforms."""
import py, os, sys
from rpython.translator.platform import Platform, log, _run_subprocess
import rpython
rpydir = str(py.path.local(rpython.__file__).join('..'))
| 36.009585 | 130 | 0.53021 |
bcde4233b8d9a36e066c7f656e904c7a4e46422b
| 3,247 |
py
|
Python
|
chintai-scrape/A001_parse_htmls.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | 16 |
2018-02-06T14:43:41.000Z
|
2021-01-23T05:07:33.000Z
|
chintai-scrape/A001_parse_htmls.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | null | null | null |
chintai-scrape/A001_parse_htmls.py
|
GINK03/itmedia-scraping
|
5afbe06dd0aa12db1694a2b387aa2eeafb20e981
|
[
"MIT"
] | 4 |
2018-01-16T13:50:43.000Z
|
2019-12-16T19:45:54.000Z
|
import glob
import bs4
import gzip
import pickle
import re
import os
from concurrent.futures import ProcessPoolExecutor as PPE
import json
from pathlib import Path
from hashlib import sha256
import shutil
Path('json').mkdir(exist_ok=True)
#urls = [sha256(bytes(v, 'utf8')).hexdigest() for v in json.load(fp=open('./hash_url.json')).values()]
#fns = [f'./htmls/{url}' for url in urls]
import random
files = glob.glob('./htmls/*')
random.shuffle(files)
args = {}
for index, fn in enumerate(files):
key = index%8
if args.get(key) is None:
args[key] = []
args[key].append(fn)
args = [(key,fns) for key,fns in args.items()]
#[pmap(arg) for arg in args]
with PPE(max_workers=8) as exe:
exe.map(pmap, args)
| 36.077778 | 158 | 0.55559 |
bcde81a6deec0252f40277dde895c56c9a4836eb
| 5,047 |
py
|
Python
|
google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper.py
|
ricardolsmendes/datacatalog-connectors-hive
|
9e71588133c0b0227e789c8d6bb26cfa031d2cfb
|
[
"Apache-2.0"
] | 19 |
2020-04-27T21:55:47.000Z
|
2022-03-22T19:45:14.000Z
|
google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper.py
|
ricardolsmendes/datacatalog-connectors-hive
|
9e71588133c0b0227e789c8d6bb26cfa031d2cfb
|
[
"Apache-2.0"
] | 12 |
2020-05-28T14:48:29.000Z
|
2022-01-15T17:52:09.000Z
|
google-datacatalog-apache-atlas-connector/src/google/datacatalog_connectors/apache_atlas/scrape/metadata_scraper.py
|
mesmacosta/datacatalog-connectors-hive
|
ab7e49fbef8599dd9053c2260b261ce01f510a47
|
[
"Apache-2.0"
] | 15 |
2020-05-03T17:25:51.000Z
|
2022-01-11T22:10:35.000Z
|
#!/usr/bin/python
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from google.datacatalog_connectors.apache_atlas import scrape
| 37.385185 | 79 | 0.656628 |
bcded4531d60ca947d6fb59affac50e25540dcfc
| 7,490 |
py
|
Python
|
aviary/roost/data.py
|
sxie22/aviary
|
74b87eee86067f69af6e5b86bd12fca2202c4de5
|
[
"MIT"
] | null | null | null |
aviary/roost/data.py
|
sxie22/aviary
|
74b87eee86067f69af6e5b86bd12fca2202c4de5
|
[
"MIT"
] | null | null | null |
aviary/roost/data.py
|
sxie22/aviary
|
74b87eee86067f69af6e5b86bd12fca2202c4de5
|
[
"MIT"
] | null | null | null |
import functools
import json
from os.path import abspath, dirname, exists, join
from typing import Dict, Sequence
import numpy as np
import pandas as pd
import torch
from pymatgen.core import Composition
from torch.utils.data import Dataset
def collate_batch(dataset_list):
"""
Collate a list of data and return a batch for predicting crystal
properties.
Parameters
----------
dataset_list: list of tuples for each data point.
(atom_fea, nbr_fea, nbr_fea_idx, target)
atom_fea: torch.Tensor shape (n_i, atom_fea_len)
nbr_fea: torch.Tensor shape (n_i, M, nbr_fea_len)
self_fea_idx: torch.LongTensor shape (n_i, M)
nbr_fea_idx: torch.LongTensor shape (n_i, M)
target: torch.Tensor shape (1, )
cif_id: str or int
Returns
-------
N = sum(n_i); N0 = sum(i)
batch_atom_weights: torch.Tensor shape (N, 1)
batch_atom_fea: torch.Tensor shape (N, orig_atom_fea_len)
Atom features from atom type
batch_self_fea_idx: torch.LongTensor shape (N, M)
Indices of mapping atom to copies of itself
batch_nbr_fea_idx: torch.LongTensor shape (N, M)
Indices of M neighbors of each atom
crystal_atom_idx: list of torch.LongTensor of length N0
Mapping from the crystal idx to atom idx
target: torch.Tensor shape (N, 1)
Target value for prediction
batch_comps: list
batch_ids: list
"""
# define the lists
batch_atom_weights = []
batch_atom_fea = []
batch_self_fea_idx = []
batch_nbr_fea_idx = []
crystal_atom_idx = []
batch_targets = []
batch_cry_ids = []
cry_base_idx = 0
for i, (inputs, target, *cry_ids) in enumerate(dataset_list):
atom_weights, atom_fea, self_fea_idx, nbr_fea_idx = inputs
# number of atoms for this crystal
n_i = atom_fea.shape[0]
# batch the features together
batch_atom_weights.append(atom_weights)
batch_atom_fea.append(atom_fea)
# mappings from bonds to atoms
batch_self_fea_idx.append(self_fea_idx + cry_base_idx)
batch_nbr_fea_idx.append(nbr_fea_idx + cry_base_idx)
# mapping from atoms to crystals
crystal_atom_idx.append(torch.tensor([i] * n_i))
# batch the targets and ids
batch_targets.append(target)
batch_cry_ids.append(cry_ids)
# increment the id counter
cry_base_idx += n_i
return (
(
torch.cat(batch_atom_weights, dim=0),
torch.cat(batch_atom_fea, dim=0),
torch.cat(batch_self_fea_idx, dim=0),
torch.cat(batch_nbr_fea_idx, dim=0),
torch.cat(crystal_atom_idx),
),
tuple(torch.stack(b_target, dim=0) for b_target in zip(*batch_targets)),
*zip(*batch_cry_ids),
)
| 33.738739 | 100 | 0.607076 |
bcdf1f594847bcd658c78df9bc4bf018e0d729b0
| 201 |
py
|
Python
|
tests/test_util.py
|
danqing/dqpy
|
f296341adb0dbbfb361eaf8b815b0ffd189ebf58
|
[
"MIT"
] | null | null | null |
tests/test_util.py
|
danqing/dqpy
|
f296341adb0dbbfb361eaf8b815b0ffd189ebf58
|
[
"MIT"
] | 25 |
2018-05-22T15:59:37.000Z
|
2020-02-14T08:08:24.000Z
|
tests/test_util.py
|
danqing/dqpy
|
f296341adb0dbbfb361eaf8b815b0ffd189ebf58
|
[
"MIT"
] | null | null | null |
import unittest
from dq import util
| 18.272727 | 50 | 0.656716 |
bce0bfd9222f594d713d4743ed32c26bb4279c4c
| 1,483 |
py
|
Python
|
check_perm.py
|
codecakes/random_games
|
1e670021ec97a196726e937e658878dc63ba9d34
|
[
"MIT"
] | null | null | null |
check_perm.py
|
codecakes/random_games
|
1e670021ec97a196726e937e658878dc63ba9d34
|
[
"MIT"
] | null | null | null |
check_perm.py
|
codecakes/random_games
|
1e670021ec97a196726e937e658878dc63ba9d34
|
[
"MIT"
] | null | null | null |
"""
PermCheck
Check whether array A is a permutation.
https://codility.com/demo/results/demoANZ7M2-GFU/
Task description
A non-empty zero-indexed array A consisting of N integers is given.
A permutation is a sequence containing each element from 1 to N once, and only once.
For example, array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
A[3] = 2
is a permutation, but array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
is not a permutation, because value 2 is missing.
The goal is to check whether array A is a permutation.
Write a function:
def solution(A)
that, given a zero-indexed array A, returns 1 if array A is a permutation and 0 if it is not.
For example, given array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
A[3] = 2
the function should return 1.
Given array A such that:
A[0] = 4
A[1] = 1
A[2] = 3
the function should return 0.
Assume that:
N is an integer within the range [1..100,000];
each element of array A is an integer within the range [1..1,000,000,000].
Complexity:
expected worst-case time complexity is O(N);
expected worst-case space complexity is O(N), beyond input storage (not counting the storage required for input arguments).
Elements of input arrays can be modified.
"""
| 27.981132 | 123 | 0.662171 |
bce184e301b2a2454689f53c630fba1c6046cd36
| 18,709 |
py
|
Python
|
src/oci/log_analytics/models/log_analytics_association.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249 |
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/log_analytics/models/log_analytics_association.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228 |
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/log_analytics/models/log_analytics_association.py
|
Manny27nyc/oci-python-sdk
|
de60b04e07a99826254f7255e992f41772902df7
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224 |
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
| 31.443697 | 245 | 0.65685 |
bce1c979a2eb7695c7ea999525e47a17d52983b8
| 68,069 |
py
|
Python
|
symblic_game/NEW_GAME.py
|
zishanqin/Symbolic-transfer
|
b553f188ad3f6c6492fcff556ac6f597e56cf43e
|
[
"MIT"
] | 3 |
2021-07-28T11:28:25.000Z
|
2021-07-28T11:56:58.000Z
|
symblic_game/NEW_GAME.py
|
zishanqin/Symbolic-transfer
|
b553f188ad3f6c6492fcff556ac6f597e56cf43e
|
[
"MIT"
] | null | null | null |
symblic_game/NEW_GAME.py
|
zishanqin/Symbolic-transfer
|
b553f188ad3f6c6492fcff556ac6f597e56cf43e
|
[
"MIT"
] | 1 |
2021-07-28T11:40:45.000Z
|
2021-07-28T11:40:45.000Z
|
'Author: Aimore Resende Riquetti Dutra'
'''email: [email protected]'''
# -------------------------------------------------------------------------------------------------- #
# This code can run 4 different models of Reinforcement Learning:
# Q-Learning (QL), DQN, SRL (DSRL), SRL+CS(DSRL_object_near) and some other variations of SRL
# The setting for each run can be set at the end of the code
# It can load and save the models in Excel form
# There are some pre-defined environments, but you can create your own
# Press G to get intermediate Graphs and P to stop
# -------------------------------------------------------------------------------------------------- #
import Class
import pprint
import random
import sys
import numpy as np
import pygame
# from pyglet import clock
import pandas as pd
import time
import json
from time import sleep
import math
import matplotlib.pyplot as plt
import os
import glob
## Comment this part if not using DQN model:
# import keras
# from keras.models import Sequential
# from keras.layers import Dense, Activation, Flatten
# from keras.models import model_from_json
# from keras.optimizers import sgd
# from keras.utils import plot_model
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
# set_session(tf.Session(config=config))
# ------ environments ------
# region COLOR DEFINITION
explore_set = set()
explore_dict = dict()
white = (255, 255, 255)
black = (0, 0, 0)
grey = (80, 80, 80)
red = (255, 0, 0)
blue = (0, 0, 255)
green = (0, 255, 0)
yellow = (250, 250, 0)
pink = (250, 105, 180)
# endregion
# region PANDAS DEFINITION
pd.set_option('display.max_columns', None)
pd.set_option('display.large_repr', 'info')
desired_width = 180
pd.set_option('display.width', desired_width)
pd.set_option('precision', 4)
# endregion
np.random.seed(123) # For reproducibility
pygame.init() # Pygame initialialization
pp = pprint.PrettyPrinter(indent=4)
actions = ['up', 'down', 'right', 'left']
actions_dict = {'up':0, 'down':1, 'right':2, 'left':3}
p_keys = [pygame.K_w, pygame.K_a, pygame.K_s, pygame.K_d]
# clock.tick(20)
def pop(self):
'''Removes a layer instance on top of the layer stack.
'''
while self.outputs:
self.layers.pop()
if not self.layers:
self.outputs = []
self.inbound_nodes = []
self.outbound_nodes = []
else:
self.layers[-1].outbound_nodes = []
self.outputs = [self.layers[-1].output]
self.built = False
# region REWARDS
negative_reward = 5 # Negative Reward
positive_reward = 1 # Positive Reward
step_reward = 0 # Reward received by each step
# endregion
# ------ environments configuration (till line 640) ------
# region TEXT FONTS DEFINITION
smallfont = pygame.font.SysFont('comicsansms', 13)
smallfont_act = pygame.font.SysFont('arial', 13)
mediumfont_act = pygame.font.SysFont('arial', 18, bold=True)
pygame.font.init()
# endregion
# region DISPLAY FUNCTIONS
# endregion
# region CREATE OBJ_LIST FROM STATE AND RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS
''' CREATE obj_list - FROM env '''
''' CREATE A RELATIONSHIP LIST BETWEEN AGENT AND OBJECTS - FROM obj_list '''
# endregion
# region DRAW OBJECTS
x_zero_screen = 50
y_zero_screen = 180
size_obj = 37
# endregion
# region CREATE THE STATE FROM THE ENVIRONMENT
# endregion
# region ENVIRONMENT CONFIGURATION
# endregion
# region SAVE - LOAD - CREATE
# endregion
# ------ RL algorithms (till line 1030) ------
# region DQN - CONFIGURATIONS
#
alfa = 1 # Learning Rate
gamma = 0.9 # Temporal Discount Factor
''' PROGRAM START '''
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
# -------------------------------------------------------------------------------------------------- #
''' SELECT PARAMETERS TO RUN THE SOFTWARE '''
# environment configuration
Env = 11
Alg_list = ["QL",
"DSRL",
"DSRL_object_near",
"DQN",
"DSRL_dist",
"DSRL_dist_type",
"DSRL_dist_type_near",
"DSRL_dist_type_near_propNeg",
"DSRL_object"]
Alg = Alg_list[2] # Select the algorithm to be used
Learn = False # To update its knowledge
Load = True # To load a learned model
Load_path = "/Results/Train/Env_11/Train_Env_11_DSRL 02 41 20 05-05-21"
# algorithm configuration
Samples = 2 # Usually 10 samples (repeat 100 episodes for 10 times)
Print = True # Print some info in the terminal
Auto = True # Agent moves Automatic or if False it moves by pressing the Spacebar key
Server = False # If running in the server since
# change Prob to 1 for probe training??
Prob = 0.3 # Probability to make a random move (exploration rate)
Cond_to_end = "max_steps" # Choose from below (there are 4)
Save = False # Save the model
speed = 0.05 # seconds per frame
# Cond_to_end = "max_steps"
# Cond_to_end = "coll_all"
# Cond_to_end = "only_negative"
Episodes = 500 # Usually 1000 or 100
# region DQN Model Configurations:
# max_memory_list = [5, 5, 5, 30, 30, 30, 100, 100, 100]
# hidden_size_list = [5, 30, 270, 5, 30, 270, 5, 30, 270]
# batch_size_list = [1, 1, 1, 10, 10, 10, 32, 32, 32]
max_memory_list = [100, 100, 100, 300, 300, 300, 900, 900, 900]
hidden_size_list = [5, 10, 15, 5, 10, 15, 5, 10, 15]
batch_size_list = [32, 32, 32, 32, 32, 32, 32, 32, 32]
optimizer_list = ["adam", "rms_opt"]
n_actions = 4 # [move_up, move_down, move_left, move_right]
# endregion
Net_comb_param = 4
# ------------------------------------------------------------------------------------------- #
run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
# ------------------------------------------------------------------------------------------- #
''' REPEAT DQN Net_Comb_Param '''
# for i in range(9):
# Net_comb_param = i
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
''' REPEAT Alg for a list of Env '''
# env_list = [2,3]
# for Env in env_list:
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
''' Alg_list for Env_list '''
# env_list = [2,3]
# alg_list = ["QL", "DSRL", "DSRL_object_near", "DQN"]
# for Env in env_list:
# for Alg in alg_list:
# run(Env, Alg, Learn, Load, Print, Auto, Episodes, Cond_to_end, Server, Net_comb_param, Load_path, Prob, Samples, Save)
| 41.734519 | 231 | 0.491707 |
bce207bdb62870e146b1e56a1b168c691c0515ac
| 4,273 |
py
|
Python
|
utils/scene_bounding_box.py
|
davidemarelli/sfm_flow
|
7a96d8309cc01b8499347ba0cae882923d82bbcc
|
[
"MIT"
] | 8 |
2020-10-27T12:52:17.000Z
|
2022-03-30T04:15:37.000Z
|
utils/scene_bounding_box.py
|
ElsevierSoftwareX/SOFTX_2020_51
|
b240a113c91405fac60444a6e56e87e3cf17a27b
|
[
"MIT"
] | 1 |
2020-11-09T01:56:04.000Z
|
2020-11-24T15:58:26.000Z
|
utils/scene_bounding_box.py
|
davidemarelli/sfm_flow
|
7a96d8309cc01b8499347ba0cae882923d82bbcc
|
[
"MIT"
] | 2 |
2021-12-02T10:04:39.000Z
|
2022-03-28T07:54:07.000Z
|
import logging
from typing import Tuple
import bpy
from mathutils import Vector
from .object import get_objs
logger = logging.getLogger(__name__)
################################################################################################
# Methods
#
# ==============================================================================================
def compute(self):
"""Compute the scene bounding box values."""
objs = get_objs(self.scene, exclude_collections=self.exclude_collections, mesh_only=True)
logger.debug("Found %i objects in scene %s", len(objs), self.scene.name)
for obj in objs:
obb = obj.bound_box
for i in range(8):
p = obj.matrix_world @ Vector(obb[i])
self.x_min = min(self.x_min, p[0])
self.x_max = max(self.x_max, p[0])
self.y_min = min(self.y_min, p[1])
self.y_max = max(self.y_max, p[1])
self.z_min = min(self.z_min, p[2])
self.z_max = max(self.z_max, p[2])
if objs:
self.center = Vector(((self.x_max + self.x_min) / 2,
(self.y_max + self.y_min) / 2,
(self.z_max + self.z_min) / 2))
logger.debug(str(self))
# ==============================================================================================
def get_min_vector(self):
"""Get minimum axis."""
return Vector((self.x_min, self.y_min, self.z_min))
# ==============================================================================================
def get_max_vector(self):
"""Get maximum axis."""
return Vector((self.x_max, self.y_max, self.z_max))
################################################################################################
# Builtin methods
#
# ==============================================================================================
| 39.934579 | 113 | 0.388018 |
bce25f2b08abacab5318cf6e45474c91216d772e
| 38,887 |
py
|
Python
|
tensor2tensor/trax/rlax/ppo.py
|
funtion/tensor2tensor
|
339295a276c4bfc93894c474979d0620d14b9710
|
[
"Apache-2.0"
] | 1 |
2020-09-22T02:07:16.000Z
|
2020-09-22T02:07:16.000Z
|
tensor2tensor/trax/rlax/ppo.py
|
joeyism/tensor2tensor
|
2f0edae221a9ec2a415dbf7fcc3ff25b8777d830
|
[
"Apache-2.0"
] | null | null | null |
tensor2tensor/trax/rlax/ppo.py
|
joeyism/tensor2tensor
|
2f0edae221a9ec2a415dbf7fcc3ff25b8777d830
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""PPO in JAX.
Notation:
B, scalar - batch size
T, scalar - number of time-steps in a trajectory, or the value of the padded
time-step dimension.
OBS, tuple - shape of a singular observation from the environment.
Ex: For CartPole-v0 this is (4,) and Pong-v0 it's (210, 160, 3)
A, scalar - Number of actions, assuming a discrete space.
Policy and Value function signatures:
Policy Function :: [B, T] + OBS -> [B, T, A]
Value Function :: [B, T] + OBS -> [B, T, 1]
Policy and Value Function :: [B, T] + OBS -> ([B, T, A], [B, T, 1])
i.e. the policy net should take a batch of *trajectories* and at each time-step
in each batch deliver a probability distribution over actions.
NOTE: It doesn't return logits, rather the expectation is that it returns
log-probabilities instead.
NOTE: The policy and value functions need to take care to not take into account
future time-steps while deciding the actions (or value) for the current
time-step.
Policy and Value Function produces a tuple of the expected output of a policy
function and a value function.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import pickle
import time
from absl import logging
import gym
from jax import grad
from jax import jit
from jax import lax
from jax import numpy as np
from jax import random as jax_random
import numpy as onp
from tensor2tensor.envs import env_problem
from tensor2tensor.envs import env_problem_utils
from tensor2tensor.trax import jaxboard
from tensor2tensor.trax import layers
from tensor2tensor.trax import optimizers as trax_opt
from tensor2tensor.trax import trax
from tensorflow.io import gfile
DEBUG_LOGGING = False
GAMMA = 0.99
LAMBDA = 0.95
EPSILON = 0.1
EPOCHS = 50 # 100
NUM_OPTIMIZER_STEPS = 100
PRINT_EVERY_OPTIMIZER_STEP = 20
BATCH_TRAJECTORIES = 32
def policy_and_value_net(rng_key,
batch_observations_shape,
num_actions,
bottom_layers_fn=None,
two_towers=True):
"""A policy and value net function."""
# Layers.
# Now, with the current logits, one head computes action probabilities and the
# other computes the value function.
# NOTE: The LogSoftmax instead of the Softmax because of numerical stability.
net = None
if not two_towers:
tower = [] if bottom_layers_fn is None else bottom_layers_fn()
tower.extend([
layers.Branch(
layers.Serial(layers.Dense(num_actions), layers.LogSoftmax()),
layers.Dense(1))
])
net = layers.Serial(*tower)
else:
tower1 = [] if bottom_layers_fn is None else bottom_layers_fn()
tower2 = [] if bottom_layers_fn is None else bottom_layers_fn()
tower1.extend([layers.Dense(num_actions), layers.LogSoftmax()])
tower2.extend([layers.Dense(1)])
net = layers.Branch(
layers.Serial(*tower1),
layers.Serial(*tower2),
)
assert net
return net.initialize(batch_observations_shape, rng_key), net
# Should this be collect 'n' trajectories, or
# Run the env for 'n' steps and take completed trajectories, or
# Any other option?
# TODO(afrozm): Replace this with EnvProblem?
def collect_trajectories(env,
policy_fun,
num_trajectories=1,
policy=env_problem_utils.CATEGORICAL_SAMPLING,
max_timestep=None,
boundary=20,
epsilon=0.1,
reset=True,
rng=None):
"""Collect trajectories with the given policy net and behaviour.
Args:
env: A gym env interface, for now this is not-batched.
policy_fun: observations(B,T+1) -> log-probabs(B,T+1, A) callable.
num_trajectories: int, number of trajectories.
policy: string, "greedy", "epsilon-greedy", or "categorical-sampling" i.e.
how to use the policy_fun to return an action.
max_timestep: int or None, the index of the maximum time-step at which we
return the trajectory, None for ending a trajectory only when env returns
done.
boundary: int, boundary for padding, used in EnvProblem envs.
epsilon: float, the epsilon for `epsilon-greedy` policy.
reset: bool, true if we want to reset the envs. The envs are also reset if
max_max_timestep is None or < 0
rng: jax rng, splittable.
Returns:
A tuple (trajectory, number of trajectories that are done)
trajectory: list of (observation, action, reward) tuples, where each element
`i` is a tuple of numpy arrays with shapes as follows:
observation[i] = (B, T_i + 1)
action[i] = (B, T_i)
reward[i] = (B, T_i)
"""
assert isinstance(env, env_problem.EnvProblem)
# This is an env_problem, run its collect function.
return env_problem_utils.play_env_problem_with_policy(
env,
policy_fun,
num_trajectories=num_trajectories,
max_timestep=max_timestep,
boundary=boundary,
policy_sampling=policy,
eps=epsilon,
reset=reset,
rng=rng)
# This function can probably be simplified, ask how?
# Can we do something much simpler than lax.pad, maybe np.pad?
# Others?
def get_padding_value(dtype):
"""Returns the padding value given a dtype."""
padding_value = None
if dtype == np.uint8:
padding_value = np.uint8(0)
elif dtype == np.uint16:
padding_value = np.uint16(0)
elif dtype == np.float32 or dtype == np.float64:
padding_value = 0.0
else:
padding_value = 0
assert padding_value is not None
return padding_value
# TODO(afrozm): Use np.pad instead and make jittable?
def pad_trajectories(trajectories, boundary=20):
"""Pad trajectories to a bucket length that is a multiple of boundary.
Args:
trajectories: list[(observation, actions, rewards)], where each observation
is shaped (t+1,) + OBS and actions & rewards are shaped (t,), with the
length of the list being B (batch size).
boundary: int, bucket length, the actions and rewards are padded to integer
multiples of boundary.
Returns:
tuple: (padding lengths, reward_mask, padded_observations, padded_actions,
padded_rewards) where padded_observations is shaped (B, T+1) + OBS and
padded_actions, padded_rewards & reward_mask are shaped (B, T).
Where T is max(t) rounded up to an integer multiple of boundary.
padded_length is how much padding we've added and
reward_mask is 1s for actual rewards and 0s for the padding.
"""
# Let's compute max(t) over all trajectories.
t_max = max(r.shape[0] for (_, _, r) in trajectories)
# t_max is rounded to the next multiple of `boundary`
boundary = int(boundary)
bucket_length = boundary * int(np.ceil(float(t_max) / boundary))
# So all obs will be padded to t_max + 1 and actions and rewards to t_max.
padded_observations = []
padded_actions = []
padded_rewards = []
padded_lengths = []
reward_masks = []
for (o, a, r) in trajectories:
# Determine the amount to pad, this holds true for obs, actions and rewards.
num_to_pad = bucket_length + 1 - o.shape[0]
padded_lengths.append(num_to_pad)
if num_to_pad == 0:
padded_observations.append(o)
padded_actions.append(a)
padded_rewards.append(r)
reward_masks.append(onp.ones_like(r, dtype=np.int32))
continue
# First pad observations.
padding_config = [(0, num_to_pad, 0)]
for _ in range(o.ndim - 1):
padding_config.append((0, 0, 0))
padding_config = tuple(padding_config)
padding_value = get_padding_value(o.dtype)
action_padding_value = get_padding_value(a.dtype)
reward_padding_value = get_padding_value(r.dtype)
padded_obs = lax.pad(o, padding_value, padding_config)
padded_observations.append(padded_obs)
# Now pad actions and rewards.
assert a.ndim == 1 and r.ndim == 1
padding_config = ((0, num_to_pad, 0),)
padded_action = lax.pad(a, action_padding_value, padding_config)
padded_actions.append(padded_action)
padded_reward = lax.pad(r, reward_padding_value, padding_config)
padded_rewards.append(padded_reward)
# Also create the mask to use later.
reward_mask = onp.ones_like(r, dtype=np.int32)
reward_masks.append(lax.pad(reward_mask, 0, padding_config))
return padded_lengths, np.stack(reward_masks), np.stack(
padded_observations), np.stack(padded_actions), np.stack(padded_rewards)
# TODO(afrozm): JAX-ify this, this is too slow for pong.
def rewards_to_go(rewards, mask, gamma=0.99):
r"""Computes rewards to go.
Reward to go is defined as follows, the discounted reward that we have to
yet collect, going forward from this point, i.e.:
r2g_t = \sum_{l=0}^{\infty} (\gamma^{l} * reward_{t+l})
Args:
rewards: np.ndarray of shape (B, T) of rewards.
mask: np.ndarray of shape (B, T) of mask for the rewards.
gamma: float, discount factor.
Returns:
rewards to go, np.ndarray of shape (B, T).
"""
B, T = rewards.shape # pylint: disable=invalid-name,unused-variable
masked_rewards = rewards * mask # (B, T)
# We use the following recurrence relation, derived from the equation above:
#
# r2g[t+1] = (r2g[t] - r[t]) / gamma
#
# This means we'll need to calculate r2g[0] first and then r2g[1] and so on ..
#
# **However** this leads to overflows for long sequences: r2g[t] - r[t] > 0
# and gamma < 1.0, so the division keeps increasing.
#
# So we just run the recurrence in reverse, i.e.
#
# r2g[t] = r[t] + (gamma*r2g[t+1])
#
# This is much better, but might have lost updates since the (small) rewards
# at earlier time-steps may get added to a (very?) large sum.
# Compute r2g_{T-1} at the start and then compute backwards in time.
r2gs = [masked_rewards[:, -1]]
# Go from T-2 down to 0.
for t in reversed(range(T - 1)):
r2gs.append(masked_rewards[:, t] + (gamma * r2gs[-1]))
# The list should have length T.
assert T == len(r2gs)
# First we stack them in the correct way to make it (B, T), but these are
# still from newest (T-1) to oldest (0), so then we flip it on time axis.
return np.flip(np.stack(r2gs, axis=1), axis=1)
# TODO(afrozm): JAX-ify this, this is too slow for pong.
def deltas(predicted_values, rewards, mask, gamma=0.99):
r"""Computes TD-residuals from V(s) and rewards.
Where a `delta`, i.e. a td-residual is defined as:
delta_{b,t} = r_{b,t} + \gamma * v_{b,t+1} - v_{b,t}.
Args:
predicted_values: ndarray of shape (B, T+1). NOTE: Expects axis 2 was
squeezed. These represent V(s_bt) for b < B and t < T+1
rewards: ndarray of shape (B, T) of rewards.
mask: ndarray of shape (B, T) of mask for rewards.
gamma: float, discount factor.
Returns:
ndarray of shape (B, T) of one-step TD-residuals.
"""
# `d`s are basically one-step TD residuals.
d = []
_, T = rewards.shape # pylint: disable=invalid-name
for t in range(T):
d.append(rewards[:, t] + (gamma * predicted_values[:, t + 1]) -
predicted_values[:, t])
return np.array(d).T * mask
def gae_advantages(td_deltas, mask, lambda_=0.95, gamma=0.99):
r"""Computes the GAE advantages given the one step TD-residuals.
The formula for a GAE advantage estimator is as follows:
A_{bt} = \sum_{l=0}^{\infty}(\gamma * \lambda)^{l}(\delta_{b,t+l}).
Internally we just call rewards_to_go, since it is the same computation.
Args:
td_deltas: np.ndarray of shape (B, T) of one step TD-residuals.
mask: np.ndarray of shape (B, T) of mask for the residuals. It maybe the
case that the `td_deltas` are already masked correctly since they are
produced by `deltas(...)`
lambda_: float, lambda parameter for GAE estimators.
gamma: float, lambda parameter for GAE estimators.
Returns:
GAE advantage estimates.
"""
return rewards_to_go(td_deltas, mask, lambda_ * gamma)
def chosen_probabs(probab_observations, actions):
"""Picks out the probabilities of the actions along batch and time-steps.
Args:
probab_observations: ndarray of shape `[B, T+1, A]`, where
probab_observations[b, t, i] contains the log-probability of action = i at
the t^th time-step in the b^th trajectory.
actions: ndarray of shape `[B, T]`, with each entry in [0, A) denoting which
action was chosen in the b^th trajectory's t^th time-step.
Returns:
`[B, T]` ndarray with the log-probabilities of the chosen actions.
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == probab_observations.shape[:2]
return probab_observations[np.arange(B)[:, None], np.arange(T), actions]
def compute_probab_ratios(p_new, p_old, actions, reward_mask):
"""Computes the probability ratios for each time-step in a trajectory.
Args:
p_new: ndarray of shape [B, T+1, A] of the log-probabilities that the policy
network assigns to all the actions at each time-step in each batch using
the old parameters.
p_old: ndarray of shape [B, T+1, A], same as above, but using old policy
network parameters.
actions: ndarray of shape [B, T] where each element is from [0, A).
reward_mask: ndarray of shape [B, T] masking over probabilities.
Returns:
probab_ratios: ndarray of shape [B, T], where
probab_ratios_{b,t} = p_new_{b,t,action_{b,t}} / p_old_{b,t,action_{b,t}}
"""
B, T = actions.shape # pylint: disable=invalid-name
assert (B, T + 1) == p_old.shape[:2]
assert (B, T + 1) == p_new.shape[:2]
logp_old = chosen_probabs(p_old, actions)
logp_new = chosen_probabs(p_new, actions)
assert (B, T) == logp_old.shape
assert (B, T) == logp_new.shape
# Since these are log-probabilities, we just subtract them.
probab_ratios = np.exp(logp_new - logp_old) * reward_mask
assert (B, T) == probab_ratios.shape
return probab_ratios
def clipped_probab_ratios(probab_ratios, epsilon=0.2):
return np.clip(probab_ratios, 1 - epsilon, 1 + epsilon)
def clipped_objective(probab_ratios, advantages, reward_mask, epsilon=0.2):
return np.minimum(
probab_ratios * advantages,
clipped_probab_ratios(probab_ratios, epsilon=epsilon) *
advantages) * reward_mask
def get_time(t1, t2=None):
if t2 is None:
t2 = time.time()
return round((t2 - t1) * 1000, 2)
def approximate_kl(log_prob_new, log_prob_old, mask):
"""Computes the approximate KL divergence between the old and new log-probs.
Args:
log_prob_new: (B, T+1, A) log probs new
log_prob_old: (B, T+1, A) log probs old
mask: (B, T)
Returns:
Approximate KL.
"""
diff = log_prob_old - log_prob_new
# Cut the last time-step out.
diff = diff[:, :-1]
# Mask out the irrelevant part.
diff *= mask[:, :, np.newaxis] # make mask (B, T, 1)
# Average on non-masked part.
return np.sum(diff) / np.sum(mask)
def masked_entropy(log_probs, mask):
"""Computes the entropy for the given log-probs.
Args:
log_probs: (B, T+1, A) log probs
mask: (B, T) mask.
Returns:
Entropy.
"""
# Cut the last time-step out.
lp = log_probs[:, :-1]
# Mask out the irrelevant part.
lp *= mask[:, :, np.newaxis] # make mask (B, T, 1)
p = np.exp(lp) * mask[:, :, np.newaxis] # (B, T, 1)
# Average on non-masked part and take negative.
return -(np.sum(lp * p) / np.sum(mask))
def evaluate_policy(eval_env,
get_predictions,
boundary,
max_timestep=20000,
rng=None):
"""Evaluate the policy."""
avg_rewards = {}
for policy in [
env_problem_utils.CATEGORICAL_SAMPLING, env_problem_utils.GUMBEL_SAMPLING,
env_problem_utils.EPSILON_GREEDY
]:
trajs, _ = env_problem_utils.play_env_problem_with_policy(
eval_env,
get_predictions,
boundary=boundary,
max_timestep=max_timestep,
reset=True,
policy_sampling=policy,
rng=rng)
avg_rewards[policy] = float(sum(
np.sum(traj[2]) for traj in trajs)) / len(trajs)
return avg_rewards
def maybe_restore_params(output_dir, policy_and_value_net_params):
"""Maybe restore the params from the checkpoint dir.
Args:
output_dir: Directory where saved model checkpoints are stored.
policy_and_value_net_params: Default params, returned if model is'nt found.
Returns:
triple (restore (bool), params, iter(int)) where iter is the epoch from
which we restored the params, 0 is restore = False.
"""
model_files = gfile.glob(os.path.join(output_dir, "model-??????.pkl"))
if not model_files:
return False, policy_and_value_net_params, 0
model_file = sorted(model_files)[-1]
model_file_basename = os.path.basename(model_file) # model-??????.pkl
i = int(filter(str.isdigit, model_file_basename))
with gfile.GFile(model_file, "rb") as f:
policy_and_value_net_params = pickle.load(f)
return True, policy_and_value_net_params, i
def training_loop(
env=None,
epochs=EPOCHS,
policy_and_value_net_fun=None,
policy_and_value_optimizer_fun=None,
batch_size=BATCH_TRAJECTORIES,
num_optimizer_steps=NUM_OPTIMIZER_STEPS,
print_every_optimizer_steps=PRINT_EVERY_OPTIMIZER_STEP,
target_kl=0.01,
boundary=20,
max_timestep=None,
max_timestep_eval=20000,
random_seed=None,
gamma=GAMMA,
lambda_=LAMBDA,
epsilon=EPSILON,
c1=1.0,
c2=0.01,
output_dir=None,
eval_every_n=1000,
eval_env=None,
done_frac_for_policy_save=0.5,
enable_early_stopping=True,
env_name=None,
):
"""Runs the training loop for PPO, with fixed policy and value nets."""
assert env
assert output_dir
assert env_name
gfile.makedirs(output_dir)
# Create summary writers and history.
train_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "train"))
timing_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "timing"))
eval_sw = jaxboard.SummaryWriter(os.path.join(output_dir, "eval"))
train_sw.text("env_name", env_name)
timing_sw.text("env_name", env_name)
eval_sw.text("env_name", env_name)
jax_rng_key = trax.get_random_number_generator_and_set_seed(random_seed)
# Batch Observations Shape = [-1, -1] + OBS, because we will eventually call
# policy and value networks on shape [B, T] +_OBS
batch_observations_shape = (-1, -1) + env.observation_space.shape
assert isinstance(env.action_space, gym.spaces.Discrete)
num_actions = env.action_space.n
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
# Initialize the policy and value network.
policy_and_value_net_params, policy_and_value_net_apply = (
policy_and_value_net_fun(key1, batch_observations_shape, num_actions))
# Maybe restore the policy params. If there is nothing to restore, then
# iteration = 0 and policy_and_value_net_params are returned as is.
restore, policy_and_value_net_params, iteration = (
maybe_restore_params(output_dir, policy_and_value_net_params))
if restore:
logging.info("Restored parameters from iteration [%d]", iteration)
# We should start from the next iteration.
iteration += 1
policy_and_value_net_apply = jit(policy_and_value_net_apply)
# Initialize the optimizers.
policy_and_value_optimizer = (
policy_and_value_optimizer_fun(policy_and_value_net_params))
(policy_and_value_opt_state, policy_and_value_opt_update,
policy_and_value_get_params) = policy_and_value_optimizer
num_trajectories_done = 0
last_saved_at = 0
logging.info("Starting the PPO training loop.")
for i in range(iteration, epochs):
epoch_start_time = time.time()
# Params we'll use to collect the trajectories.
policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
# A function to get the policy and value predictions.
def get_predictions(observations, rng=None):
"""Returns log-probs, value predictions and key back."""
key, key1 = jax_random.split(rng, num=2)
log_probs, value_preds = policy_and_value_net_apply(
observations, policy_and_value_net_params, rng=key1)
return log_probs, value_preds, key
# Evaluate the policy.
policy_eval_start_time = time.time()
if ((i + 1) % eval_every_n == 0) or (i == epochs - 1):
jax_rng_key, key = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Epoch [% 6d] evaluating policy.", i)
avg_reward = evaluate_policy(
eval_env,
get_predictions,
boundary,
max_timestep=max_timestep_eval,
rng=key)
for k, v in avg_reward.items():
eval_sw.scalar("eval/mean_reward/%s" % k, v, step=i)
logging.info("Epoch [% 6d] Policy Evaluation [%s] = %10.2f", i, k, v)
policy_eval_time = get_time(policy_eval_start_time)
trajectory_collection_start_time = time.time()
logging.vlog(1, "Epoch [% 6d] collecting trajectories.", i)
jax_rng_key, key = jax_random.split(jax_rng_key)
trajs, num_done = collect_trajectories(
env,
policy_fun=get_predictions,
num_trajectories=batch_size,
max_timestep=max_timestep,
boundary=boundary,
rng=key,
reset=(i == 0) or restore,
epsilon=(10.0 / (i + 10.0))) # this is a different epsilon.
trajectory_collection_time = get_time(trajectory_collection_start_time)
logging.vlog(1, "Collecting trajectories took %0.2f msec.",
trajectory_collection_time)
avg_reward = float(sum(np.sum(traj[2]) for traj in trajs)) / len(trajs)
max_reward = max(np.sum(traj[2]) for traj in trajs)
min_reward = min(np.sum(traj[2]) for traj in trajs)
train_sw.scalar("train/mean_reward", avg_reward, step=i)
logging.vlog(1, "Rewards avg=[%0.2f], max=[%0.2f], min=[%0.2f], all=%s",
avg_reward, max_reward, min_reward,
[float(np.sum(traj[2])) for traj in trajs])
logging.vlog(1,
"Trajectory Length average=[%0.2f], max=[%0.2f], min=[%0.2f]",
float(sum(len(traj[0]) for traj in trajs)) / len(trajs),
max(len(traj[0]) for traj in trajs),
min(len(traj[0]) for traj in trajs))
logging.vlog(2, "Trajectory Lengths: %s", [len(traj[0]) for traj in trajs])
padding_start_time = time.time()
(_, reward_mask, padded_observations, padded_actions,
padded_rewards) = pad_trajectories(
trajs, boundary=boundary)
padding_time = get_time(padding_start_time)
logging.vlog(1, "Padding trajectories took %0.2f msec.",
get_time(padding_start_time))
logging.vlog(1, "Padded Observations' shape [%s]",
str(padded_observations.shape))
logging.vlog(1, "Padded Actions' shape [%s]", str(padded_actions.shape))
logging.vlog(1, "Padded Rewards' shape [%s]", str(padded_rewards.shape))
# Calculate log-probabilities and value predictions of the trajectories.
# We'll pass these to the loss functions so as to not get recomputed.
# NOTE:
# There is a slight problem here, if the policy network contains
# stochasticity in the log-probabilities (ex: dropout), then calculating
# these again here is not going to be correct and should be done in the
# collect function.
log_prob_recompute_start_time = time.time()
jax_rng_key, key = jax_random.split(jax_rng_key)
log_probabs_traj, value_predictions_traj, _ = get_predictions(
padded_observations, rng=key)
log_prob_recompute_time = get_time(log_prob_recompute_start_time)
# Some assertions.
B, T = padded_actions.shape # pylint: disable=invalid-name
assert (B, T) == padded_rewards.shape
assert (B, T) == reward_mask.shape
assert (B, T + 1) == padded_observations.shape[:2]
assert (B, T + 1) + env.observation_space.shape == padded_observations.shape
# Linear annealing from 0.1 to 0.0
# epsilon_schedule = epsilon if epochs == 1 else epsilon * (1.0 -
# (i /
# (epochs - 1)))
# Constant epsilon.
epsilon_schedule = epsilon
# Compute value and ppo losses.
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(2, "Starting to compute P&V loss.")
loss_compute_start_time = time.time()
cur_combined_loss, cur_ppo_loss, cur_value_loss, entropy_bonus = (
combined_loss(
policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=key1))
loss_compute_time = get_time(loss_compute_start_time)
logging.vlog(
1,
"Calculating P&V loss [%10.2f(%10.2f, %10.2f, %10.2f)] took %0.2f msec.",
cur_combined_loss, cur_value_loss, cur_ppo_loss, entropy_bonus,
get_time(loss_compute_start_time))
jax_rng_key, key1 = jax_random.split(jax_rng_key, num=2)
logging.vlog(1, "Policy and Value Optimization")
optimization_start_time = time.time()
keys = jax_random.split(key1, num=num_optimizer_steps)
for j in range(num_optimizer_steps):
k1, k2, k3 = jax_random.split(keys[j], num=3)
t = time.time()
# Update the optimizer state.
policy_and_value_opt_state = policy_and_value_opt_step(
j,
policy_and_value_opt_state,
policy_and_value_opt_update,
policy_and_value_get_params,
policy_and_value_net_apply,
log_probabs_traj,
value_predictions_traj,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
c1=c1,
c2=c2,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
rng=k1)
# Compute the approx KL for early stopping.
new_policy_and_value_net_params = policy_and_value_get_params(
policy_and_value_opt_state)
log_probab_actions_new, _ = policy_and_value_net_apply(
padded_observations, new_policy_and_value_net_params, rng=k2)
approx_kl = approximate_kl(log_probab_actions_new, log_probabs_traj,
reward_mask)
early_stopping = enable_early_stopping and approx_kl > 1.5 * target_kl
if early_stopping:
logging.vlog(
1, "Early stopping policy and value optimization at iter: %d, "
"with approx_kl: %0.2f", j, approx_kl)
# We don't return right-away, we want the below to execute on the last
# iteration.
t2 = time.time()
if (((j + 1) % print_every_optimizer_steps == 0) or
(j == num_optimizer_steps - 1) or early_stopping):
# Compute and log the loss.
(loss_combined, loss_ppo, loss_value, entropy_bonus) = (
combined_loss(
new_policy_and_value_net_params,
log_probabs_traj,
value_predictions_traj,
policy_and_value_net_apply,
padded_observations,
padded_actions,
padded_rewards,
reward_mask,
gamma=gamma,
lambda_=lambda_,
epsilon=epsilon_schedule,
c1=c1,
c2=c2,
rng=k3))
logging.vlog(1, "One Policy and Value grad desc took: %0.2f msec",
get_time(t, t2))
logging.vlog(
1, "Combined Loss(value, ppo, entropy_bonus) [%10.2f] ->"
" [%10.2f(%10.2f,%10.2f,%10.2f)]", cur_combined_loss, loss_combined,
loss_value, loss_ppo, entropy_bonus)
if early_stopping:
break
optimization_time = get_time(optimization_start_time)
logging.vlog(
1, "Total Combined Loss reduction [%0.2f]%%",
(100 * (cur_combined_loss - loss_combined) / np.abs(cur_combined_loss)))
# Save parameters every time we see the end of at least a fraction of batch
# number of trajectories that are done (not completed -- completed includes
# truncated and done).
# Also don't save too frequently, enforce a minimum gap.
# Or if this is the last iteration.
policy_save_start_time = time.time()
num_trajectories_done += num_done
if (((num_trajectories_done >= done_frac_for_policy_save * batch_size)
and (i - last_saved_at > eval_every_n)) or (i == epochs - 1)):
logging.vlog(1, "Epoch [% 6d] saving model.", i)
params_file = os.path.join(output_dir, "model-%06d.pkl" % i)
with gfile.GFile(params_file, "wb") as f:
pickle.dump(policy_and_value_net_params, f)
# Reset this number.
num_trajectories_done = 0
last_saved_at = i
policy_save_time = get_time(policy_save_start_time)
epoch_time = get_time(epoch_start_time)
logging.info(
"Epoch [% 6d], Reward[min, max, avg] [%5.2f,%5.2f,%5.2f], Combined"
" Loss(value, ppo, entropy) [%2.5f(%2.5f,%2.5f,%2.5f)]", i, min_reward,
max_reward, avg_reward, loss_combined, loss_value, loss_ppo,
entropy_bonus)
timing_dict = {
"epoch": epoch_time,
"policy_eval": policy_eval_time,
"trajectory_collection": trajectory_collection_time,
"padding": padding_time,
"log_prob_recompute": log_prob_recompute_time,
"loss_compute": loss_compute_time,
"optimization": optimization_time,
"policy_save": policy_save_time,
}
for k, v in timing_dict.items():
timing_sw.scalar("timing/%s" % k, v, step=i)
max_key_len = max(len(k) for k in timing_dict)
timing_info_list = [
"%s : % 10.2f" % (k.rjust(max_key_len + 1), v)
for k, v in sorted(timing_dict.items())
]
logging.info("Epoch [% 6d], Timings: \n%s", i, "\n".join(timing_info_list))
# Reset restore.
restore = False
# Flush summary writers once in a while.
if (i+1) % 1000 == 0 or i == epochs - 1:
train_sw.flush()
timing_sw.flush()
eval_sw.flush()
| 35.255666 | 81 | 0.649806 |
bce5b76758741bd43e051c43114fa45c1ec64384
| 9,421 |
py
|
Python
|
models/cal.py
|
SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring
|
059f0063c1493c19b4f45fa27d13adaeb6b2b2d7
|
[
"MIT"
] | null | null | null |
models/cal.py
|
SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring
|
059f0063c1493c19b4f45fa27d13adaeb6b2b2d7
|
[
"MIT"
] | null | null | null |
models/cal.py
|
SudoRmFr/The-Nature-Conservancy-Fisheries-Monitoring
|
059f0063c1493c19b4f45fa27d13adaeb6b2b2d7
|
[
"MIT"
] | null | null | null |
"""
WS-DAN models
Hu et al.,
"See Better Before Looking Closer: Weakly Supervised Data Augmentation Network for Fine-Grained Visual Classification",
arXiv:1901.09891
"""
import logging
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import models.resnet as resnet
from models.inception import inception_v3, BasicConv2d
import models.coatnet as coatnet
import random
__all__ = ['WSDAN_CAL']
EPSILON = 1e-6
# Bilinear Attention Pooling
| 39.751055 | 135 | 0.604713 |
bce6db15719682d4f24dcfd6984365aab4377658
| 1,526 |
py
|
Python
|
tests/walls/analytic/plates.py
|
noabauma/Mirheo
|
bf7979bfbbf402d33c26ac5dc879f880e78e7017
|
[
"MIT"
] | null | null | null |
tests/walls/analytic/plates.py
|
noabauma/Mirheo
|
bf7979bfbbf402d33c26ac5dc879f880e78e7017
|
[
"MIT"
] | null | null | null |
tests/walls/analytic/plates.py
|
noabauma/Mirheo
|
bf7979bfbbf402d33c26ac5dc879f880e78e7017
|
[
"MIT"
] | 1 |
2021-07-14T13:24:05.000Z
|
2021-07-14T13:24:05.000Z
|
#!/usr/bin/env python
import mirheo as mir
dt = 0.001
ranks = (1, 1, 1)
domain = (8, 16, 8)
force = (1.0, 0, 0)
density = 4
u = mir.Mirheo(ranks, domain, dt, debug_level=3, log_filename='log', no_splash=True)
pv = mir.ParticleVectors.ParticleVector('pv', mass = 1)
ic = mir.InitialConditions.Uniform(number_density=density)
u.registerParticleVector(pv=pv, ic=ic)
dpd = mir.Interactions.Pairwise('dpd', rc=1.0, kind="DPD", a=10.0, gamma=50.0, kBT=1.0, power=0.5)
u.registerInteraction(dpd)
plate_lo = mir.Walls.Plane("plate_lo", (0, 0, -1), (0, 0, 1))
plate_hi = mir.Walls.Plane("plate_hi", (0, 0, 1), (0, 0, domain[2] - 1))
u.registerWall(plate_lo, 0)
u.registerWall(plate_hi, 0)
vv = mir.Integrators.VelocityVerlet("vv")
frozen = u.makeFrozenWallParticles(pvName="plates", walls=[plate_lo, plate_hi], interactions=[dpd], integrator=vv, number_density=density)
u.setWall(plate_lo, pv)
u.setWall(plate_hi, pv)
for p in (pv, frozen):
u.setInteraction(dpd, p, pv)
vv_dp = mir.Integrators.VelocityVerlet_withConstForce("vv_dp", force)
u.registerIntegrator(vv_dp)
u.setIntegrator(vv_dp, pv)
sample_every = 2
dump_every = 1000
bin_size = (1., 1., 0.5)
u.registerPlugins(mir.Plugins.createDumpAverage('field', [pv], sample_every, dump_every, bin_size, ["velocities"], 'h5/solvent-'))
u.run(7002)
# nTEST: walls.analytic.plates
# cd walls/analytic
# rm -rf h5
# mir.run --runargs "-n 2" ./plates.py
# mir.avgh5 xy velocities h5/solvent-0000[4-7].h5 | awk '{print $1}' > profile.out.txt
| 27.745455 | 138 | 0.692005 |
bce8b5c80fccdda525f4313d1b8dac7df83862d2
| 3,765 |
py
|
Python
|
scraper-code/myanimelist/base.py
|
XueAlfred/MALAnalysis
|
630d578b30f7540769774e1e4ee072d9775bf4bf
|
[
"MIT"
] | 15 |
2015-01-24T10:52:42.000Z
|
2021-08-09T10:23:58.000Z
|
scraper-code/myanimelist/base.py
|
XueAlfred/MALAnalysis
|
630d578b30f7540769774e1e4ee072d9775bf4bf
|
[
"MIT"
] | 10 |
2015-01-24T10:51:18.000Z
|
2018-09-05T00:17:03.000Z
|
scraper-code/myanimelist/base.py
|
XueAlfred/MALAnalysis
|
630d578b30f7540769774e1e4ee072d9775bf4bf
|
[
"MIT"
] | 18 |
2015-01-24T11:29:38.000Z
|
2021-12-04T02:41:09.000Z
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import abc
import bs4
import functools
import utilities
def loadable(func_name):
"""Decorator for getters that require a load() upon first access.
:type func_name: function
:param func_name: class method that requires that load() be called if the class's _attribute value is None
:rtype: function
:return: the decorated class method.
"""
return inner
| 27.683824 | 127 | 0.662151 |
bce99600c76ca2d8cb9cc68164960e5e44460216
| 624 |
py
|
Python
|
p2/core/http.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
p2/core/http.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
p2/core/http.py
|
BeryJu/p2
|
80b5c6a821f90cef73d6e8cd3c6cdb05ffa86b27
|
[
"MIT"
] | null | null | null |
"""p2 core http responses"""
from wsgiref.util import FileWrapper
from django.http import StreamingHttpResponse
from p2.core.constants import ATTR_BLOB_MIME, ATTR_BLOB_SIZE_BYTES
from p2.core.models import Blob
| 36.705882 | 96 | 0.758013 |
bcea70bd02cca28f65bc7151eb0b0e69448cc1e4
| 7,566 |
py
|
Python
|
lattedb/project/formfac/migrations/0009_auto_20200528_0907.py
|
callat-qcd/lattedb
|
75c06748f3d59332a84ec1b5794c215c5974a46f
|
[
"BSD-3-Clause"
] | 1 |
2019-12-11T02:33:23.000Z
|
2019-12-11T02:33:23.000Z
|
lattedb/project/formfac/migrations/0009_auto_20200528_0907.py
|
callat-qcd/lattedb
|
75c06748f3d59332a84ec1b5794c215c5974a46f
|
[
"BSD-3-Clause"
] | 10 |
2020-01-29T17:06:01.000Z
|
2021-05-31T14:41:19.000Z
|
lattedb/project/formfac/migrations/0009_auto_20200528_0907.py
|
callat-qcd/lattedb
|
75c06748f3d59332a84ec1b5794c215c5974a46f
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.0.6 on 2020-05-28 09:07
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
| 64.666667 | 239 | 0.690193 |
bceaba57987d2038b2b3f984d0fa700547f6902c
| 12,224 |
py
|
Python
|
SIO_Code/SIO_coherence.py
|
mmstoll/Ocean569_Code
|
228cb719f3e82f187f704f343d3b3590a38236d7
|
[
"MIT"
] | null | null | null |
SIO_Code/SIO_coherence.py
|
mmstoll/Ocean569_Code
|
228cb719f3e82f187f704f343d3b3590a38236d7
|
[
"MIT"
] | null | null | null |
SIO_Code/SIO_coherence.py
|
mmstoll/Ocean569_Code
|
228cb719f3e82f187f704f343d3b3590a38236d7
|
[
"MIT"
] | null | null | null |
"""
Data: Temperature and Salinity time series from SIO Scripps Pier
Salinity: measured in PSU at the surface (~0.5m) and at depth (~5m)
Temp: measured in degrees C at the surface (~0.5m) and at depth (~5m)
- Timestamp included beginning in 1990
"""
# imports
import sys,os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from scipy import signal
import scipy.stats as ss
import SIO_modules as SIO_mod
from importlib import reload
reload(SIO_mod)
# read in temp and sal files
sal_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_SALT_1916-201905.txt', sep='\t', skiprows = 27)
temp_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/SIO_TEMP_1916_201905.txt', sep='\t', skiprows = 26)
ENSO_data = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_data.xlsx')
ENSO_data_recent = pd.read_excel('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_ENSO_recent_data.xlsx')
PDO_data = pd.read_csv('/Users/MMStoll/Python/Data/Ocean569_Data/SIO_Data/NOAA_PDO_data.csv', skiprows = 1)
path_out = '/Users/MMStoll/Python/Output/Ocean569_Output/SIO_Output/'
# convert year, month, day columns to single DATE column
sal_data['DATE'] = pd.to_datetime(sal_data[['YEAR', 'MONTH', 'DAY']])
temp_data['DATE'] = pd.to_datetime(temp_data[['YEAR', 'MONTH', 'DAY']])
ENSO_data_all = ENSO_data.append(ENSO_data_recent[323:], ignore_index = True)
PDO_data['DATE'] = pd.to_datetime(PDO_data['Date'], format='%Y%m')
# remove uncertain data(SURF_FLAG between 1 and 4), replace with NaN, then interpolate
for i in range(0,len(sal_data['SURF_SAL_PSU'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
for i in range(0,len(temp_data['SURF_TEMP_C'])):
if (sal_data['SURF_FLAG'][i] >= 1) and (sal_data['SURF_FLAG'][i] <=4):
sal_data['SURF_SAL_PSU'][i] = np.nan
# interpolate missing temp and sal data
sal_data['SURF_SAL_PSU'] = sal_data['SURF_SAL_PSU'].interpolate()
temp_data['SURF_TEMP_C'] = temp_data['SURF_TEMP_C'].interpolate()
sal_data['SURF_SAL_PSU'][0] = sal_data['SURF_SAL_PSU'][1]
# remove the average from the sal and temp data and create new columns
sal_data['SURF_SAL_PSU_NOAVG'] = sal_data['SURF_SAL_PSU'] - sal_data['SURF_SAL_PSU'].mean()
temp_data['SURF_TEMP_C_NOAVG'] = temp_data['SURF_TEMP_C'] - temp_data['SURF_TEMP_C'].mean()
# remove trends from the sal and temp data and create new columns
sal_fit = np.polyfit(sal_data.index,sal_data['SURF_SAL_PSU_NOAVG'],1)
sal_fit_fn = np.poly1d(sal_fit)
temp_fit = np.polyfit(temp_data.index,temp_data['SURF_TEMP_C_NOAVG'],1)
temp_fit_fn = np.poly1d(temp_fit)
sal_fit_value = sal_fit_fn(sal_data.index)
temp_fit_value = temp_fit_fn(temp_data.index)
sal_data['SURF_SAL_PSU_DETREND'] = sal_data['SURF_SAL_PSU_NOAVG'] - sal_fit_value
temp_data['SURF_TEMP_C_DETREND'] = temp_data['SURF_TEMP_C_NOAVG'] - temp_fit_value
sal_tri = sal_data['SURF_SAL_PSU_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
temp_tri = temp_data['SURF_TEMP_C_DETREND'].rolling(center = True, window = 30, min_periods = 3, win_type = 'triang').mean()
# # 1. FFT the SIO Data
# t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_data['SURF_TEMP_C_DETREND'])
# # 2. Apply butterworth filter to SIO data, with cutoff equal to nyquist freq of enso index
# fs = 1 # sampling frequency, once per day
# fc = 1/60 # cut-off frequency of the filter (cut off periods shorter than 60 days)
# w = fc / (fs / 2) #normalize the frequency
# b, a = signal.butter(4, w, 'low')
# temp_output = signal.filtfilt(b, a, t_spec)
# # 3. Inverse FFT of filtered SIO data
# temp_ifft = np.fft.irfft(temp_output,n=len(temp_output))
# # 4. Subsample new SIO time series with same delta t as ENSO index (once per month)
# temp_ifft_sampled = np.mean(temp_ifft[0:18750].reshape(-1, 30), axis=1)
# temp_ifft_len = temp_ifft_sampled[0:618]
# x = np.linspace(0,18770, 18770)
# plt.figure()
# plt.loglog(x, temp_ifft)
# plt.show()
# butterworth low pass filter for temperature and salinity
fs = 1 # sampling frequency, once per day
fc = 1/500 # cut-off frequency of the filter (cut off periods shorter than 500 days)
w = fc / (fs / 2) #normalize the frequency
b, a = signal.butter(4, w, 'low')
temp_output = signal.filtfilt(b, a, temp_tri)
sal_output = signal.filtfilt(b, a, sal_tri)
temp_sampled = np.mean(temp_output[0:37530].reshape(-1, 30), axis=1) #length = 1251
# create dataframe with spectra for each variable
spectra_temp_df = pd.DataFrame(columns = ['Temp_freq', 'Temp_spec', 'Temp_fft'])
spectra_sal_df = pd.DataFrame(columns = ['Sal_freq', 'Sal_spec', 'Sal_fft'])
spectra_PDO_df = pd.DataFrame(columns = ['PDO_freq', 'PDO_spec', 'PDO_fft'])
spectra_ENSO_df = pd.DataFrame(columns = ['ENSO_freq', 'ENSO_spec', 'ENSO_fft'])
# for coherence, start all records at 1916-01-01
# ENSO data [20:] 1916-09-01 onward, monthly// ends now, through 2019-05-01 [:1254]
# Temp data [10:] 1916-09-01 onward, daily // ends 2019-05-31
# PDO data [752:] 1916-09-01 onward, monthly// ends now, thorugh 2019-05-01 [:1985]
# compute spectral variables for each variable
for j in range(0,4):
data_sets = [temp_sampled, sal_data['SURF_SAL_PSU_DETREND'], PDO_data['Value'][743:], ENSO_data_all['VALUE'][14:]]
freq, spec, spec_amp, fft, delt, freq_T, freq_nyquist = SIO_mod.var_fft(data_sets[j])
if j == 0:
spectra_temp_df['Temp_freq'] = freq
spectra_temp_df['Temp_spec'] = spec
spectra_temp_df['Temp_fft'] = fft
if j == 1:
spectra_sal_df['Sal_freq'] = freq
spectra_sal_df['Sal_spec'] = spec
spectra_sal_df['Sal_fft'] = fft
if j == 2:
spectra_PDO_df['PDO_freq'] = freq
spectra_PDO_df['PDO_spec'] = spec
spectra_PDO_df['PDO_fft'] = fft
if j == 3:
spectra_ENSO_df['ENSO_freq'] = freq
spectra_ENSO_df['ENSO_spec'] = spec
spectra_ENSO_df['ENSO_fft'] = fft
n_av = 5
# define terms to compute coherence between temp and ENSO
t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
e_spec_b,e_phase_b,e_freq_av_b,count=band_average(spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_fft'],spectra_ENSO_df['ENSO_freq'],n_av)
e_fft_star = np.conj(spectra_ENSO_df['ENSO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,e_fft_star,spectra_ENSO_df['ENSO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*e_spec_b)
# define colors
t_color = 'cadetblue'
s_color = 'darkslateblue'
p_color = 'seagreen'
e_color = 'steelblue'
freq_ann = 2*np.pi/365.25
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and ENSO Index \nCoherence and Phase'
im_name = 'SIO_TempENSO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = e_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{ENSO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = e_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{ENSO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
n_av = 5
# define terms to compute coherence between temp and ENSO
#t_freq,t_spec,t_spec_amp,t_fft,t_delt,t_freq_T,t_freq_nyquist = SIO_mod.var_fft(temp_sampled) #take fft/compute spectra of temp_sampled at 30 day intervals
#t_spec_b,t_phase_b,t_freq_av_b,count=band_average(t_fft,t_fft,t_freq,n_av)
p_spec_b,p_phase_b,p_freq_av_b,count=band_average(spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_fft'],spectra_PDO_df['PDO_freq'],n_av)
p_fft_star = np.conj(spectra_PDO_df['PDO_fft'])
cospec_amp2,cospec_phase2,freq_av2,count2=band_average(t_fft,p_fft_star,spectra_PDO_df['PDO_freq'],n_av)
coh_sq2=cospec_amp2**2/(t_spec_b*p_spec_b)
# plot the coherence and phase between ENSO and temperature
tstr = 'SIO Temperature and PDO Index \nCoherence and Phase'
im_name = 'SIO_TempPDO_CoherencePhase.jpg'
NR = 2; NC = 1
fig, axes = plt.subplots(nrows = NR,ncols=NC,figsize = (10,7))
axes[0].semilogx(freq_av2,coh_sq2, color = p_color)
axes[0].set_xlabel('$\omega$ (radians/day)')
axes[0].set_ylabel('Squared Coherence $\it{T}$-$\it{PDO}$')
axes[0].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.075, 0.1,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.00018, 0.1,'$\omega_o$', alpha = 0.5) #transform = ax.transAxes)
axes[0].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[0].text(0.0098, 0.1, 'Annual', alpha = 0.5)#transform = ax.transAxes)
axes[1].semilogx(freq_av2, cospec_phase2, color = p_color)
axes[1].set_xlabel('$\omega$ (radians/day)')
axes[1].set_ylabel('Phase $\it{T}$-$\it{PDO}$, degrees')
axes[1].axvline(t_freq_nyquist, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.075, -110,'$\omega_{max}$', alpha = 0.5) #transform = ax.transAxes)
axes[1].axvline(t_freq_T, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.00018, -110,'$\omega_o$', alpha = 0.5)#transform = ax.transAxes)
axes[1].axvline(freq_ann, color = 'black', linestyle = '--', alpha = 0.5)
axes[1].text(0.0098, -110, 'Annual', alpha = 0.5)#transform = ax.transAxes)
fig.suptitle(tstr)
# fig.tight_layout(pad=2.0)
plt.savefig(path_out + im_name)
plt.show()
| 47.015385 | 156 | 0.724967 |
bceb90c866742318115d3897625ab3cd17dad9ae
| 1,782 |
py
|
Python
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 7 |
2019-03-13T17:22:50.000Z
|
2022-01-09T09:03:16.000Z
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 1 |
2019-08-01T23:42:09.000Z
|
2019-08-02T16:14:31.000Z
|
abfs/group_data_split.py
|
rcdilorenzo/abfs
|
a897d00a4589a9412a9b9e737f8db91df008fc26
|
[
"MIT"
] | 2 |
2020-09-12T06:33:16.000Z
|
2021-01-01T01:05:48.000Z
|
from collections import namedtuple as Struct
from sklearn.model_selection import GroupShuffleSplit, ShuffleSplit
DataSplitConfig = Struct('DataSplitConfig', ['validation_size', 'test_size', 'random_seed'])
DEFAULT_SPLIT_CONFIG = DataSplitConfig(0.2, 0.2, 1337)
| 30.724138 | 92 | 0.640292 |
bcef12fc47d4a9fcc176c51b16eef241913a4acb
| 2,989 |
py
|
Python
|
mmcls/models/utils/se_layer.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 1,190 |
2020-07-10T01:16:01.000Z
|
2022-03-31T09:48:38.000Z
|
mmcls/models/utils/se_layer.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 702 |
2020-07-13T13:31:33.000Z
|
2022-03-31T06:48:04.000Z
|
mmcls/models/utils/se_layer.py
|
YuxinZou/mmclassification
|
2037260ea6c98a3b115e97727e1151a1c2c32f7a
|
[
"Apache-2.0"
] | 502 |
2020-07-10T02:40:55.000Z
|
2022-03-31T02:07:09.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from .make_divisible import make_divisible
| 39.853333 | 77 | 0.603546 |
bcef9b7b7442550783a878ff705f2b12e8b4982b
| 605 |
py
|
Python
|
instagram/admin.py
|
James19stack/instagram-copy_cat
|
996a8678cec84a05e97d803356194cd112ee53e6
|
[
"MIT"
] | null | null | null |
instagram/admin.py
|
James19stack/instagram-copy_cat
|
996a8678cec84a05e97d803356194cd112ee53e6
|
[
"MIT"
] | 7 |
2021-04-08T21:26:44.000Z
|
2022-03-12T00:40:52.000Z
|
instagram/admin.py
|
James19stack/instagram-copy_cat
|
996a8678cec84a05e97d803356194cd112ee53e6
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Images,Comments,Profile
# Register your models here.
admin.site.site_header='InstaPost Admin'
admin.site.site_title='InstaPost Admin Dashboard'
admin.site.register(Images,ImageInline)
admin.site.register(Profile)
| 24.2 | 49 | 0.676033 |
bcefbd3c4fdc15d991e7f75b480521ed8994a120
| 2,234 |
py
|
Python
|
mandelbruh/util.py
|
pereradrian/mandelbruh
|
fb68c5f2af84d51097e73f3a248e3a1b95fbbf47
|
[
"MIT"
] | null | null | null |
mandelbruh/util.py
|
pereradrian/mandelbruh
|
fb68c5f2af84d51097e73f3a248e3a1b95fbbf47
|
[
"MIT"
] | null | null | null |
mandelbruh/util.py
|
pereradrian/mandelbruh
|
fb68c5f2af84d51097e73f3a248e3a1b95fbbf47
|
[
"MIT"
] | null | null | null |
import numpy as np
_mandelbruh_GLOBAL_VARS = {}
| 21.68932 | 88 | 0.625783 |
bcf0d2ce383dabf5df66eb0e8657dcde75189cda
| 8,894 |
py
|
Python
|
core/recognizer.py
|
awen1988/yry
|
b65ccd7062d60f605fc978a87e060d0015cf1d4c
|
[
"Apache-2.0"
] | 129 |
2017-11-14T07:20:33.000Z
|
2021-06-18T07:07:18.000Z
|
core/recognizer.py
|
awen1988/yry
|
b65ccd7062d60f605fc978a87e060d0015cf1d4c
|
[
"Apache-2.0"
] | 10 |
2018-04-18T08:01:09.000Z
|
2018-08-17T02:57:33.000Z
|
core/recognizer.py
|
awen1988/yry
|
b65ccd7062d60f605fc978a87e060d0015cf1d4c
|
[
"Apache-2.0"
] | 35 |
2017-11-14T07:17:00.000Z
|
2021-01-21T08:10:07.000Z
|
"""
recognize face landmark
"""
import json
import os
import requests
import numpy as np
FACE_POINTS = list(range(0, 83))
JAW_POINTS = list(range(0, 19))
LEFT_EYE_POINTS = list(range(19, 29))
LEFT_BROW_POINTS = list(range(29, 37))
MOUTH_POINTS = list(range(37, 55))
NOSE_POINTS = list(range(55, 65))
RIGHT_EYE_POINTS = list(range(65, 75))
RIGHT_BROW_POINTS = list(range(75, 83))
LEFT_FACE = list(range(0, 10)) + list(range(29, 34))
RIGHT_FACE = list(range(9, 19)) + list(range(75, 80))
JAW_END = 19
FACE_START = 0
FACE_END = 83
OVERLAY_POINTS = [
LEFT_FACE,
RIGHT_FACE,
JAW_POINTS,
]
| 47.308511 | 103 | 0.619744 |
bcf263d3ef948ac8eb8afa3a601107434d608075
| 1,646 |
py
|
Python
|
magvar.py
|
rafidmorshedi/mag-dec-api
|
5daff929be8cad902f8db331090c0ed77f7bdef9
|
[
"MIT"
] | null | null | null |
magvar.py
|
rafidmorshedi/mag-dec-api
|
5daff929be8cad902f8db331090c0ed77f7bdef9
|
[
"MIT"
] | null | null | null |
magvar.py
|
rafidmorshedi/mag-dec-api
|
5daff929be8cad902f8db331090c0ed77f7bdef9
|
[
"MIT"
] | null | null | null |
import requests
import time
from bs4 import BeautifulSoup
import re
def get_mag_var(lat, lon, year, month, day, elev=0):
"""Returns the magnetic variation at a particulat point on earth.
Keyword Arguments
lat -- latitude (e.g. -180.6 deg)
lon -- longitude (e.g. -34.6 deg)
elev -- elevation in km (default 0.0)
year -- year (e.g. 2015)
month -- month (e.g. 11)
day -- day (e.g. 30)
Returns
float -- magnetic variation
"""
(latd, latm, lats) = decdeg2dms(lat)
(lond, lonm, lons) = decdeg2dms(lon)
payload = {'latd': latd,'latm':latm,'lats':lats,'lond':lond,'lonm':lonm,
'lons':lons,'elev':elev,'year':year,'month':month,'day':day,'Ein':'D'}
url = 'http://www.ga.gov.au/oracle/cgi/geoAGRF.sh'
# Sleep to avoid spamming server
time.sleep(1)
r = requests.get(url, params=payload)
if r.status_code == 200:
c = r.content
soup = BeautifulSoup(c,'html.parser')
deg_text = soup.find_all('b')[-1].text.strip()
# strip out the junk so we have a number
# Strip spaces before the search
deg_text = deg_text.replace(" ","")
deg = re.search(r'D=(.*?)deg', deg_text).group(1)
deg = float(deg)
return deg
else:
return 'something went wrong'
| 29.392857 | 76 | 0.59113 |
bcf313d0b0b9c2b0b509d979b3d7f59db6845dd5
| 81,110 |
py
|
Python
|
google-cloud-sdk/lib/googlecloudsdk/third_party/apis/datacatalog/v1beta1/datacatalog_v1beta1_messages.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | null | null | null |
google-cloud-sdk/lib/googlecloudsdk/third_party/apis/datacatalog/v1beta1/datacatalog_v1beta1_messages.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 4 |
2020-07-21T12:51:46.000Z
|
2022-01-22T10:29:25.000Z
|
google-cloud-sdk/lib/googlecloudsdk/third_party/apis/datacatalog/v1beta1/datacatalog_v1beta1_messages.py
|
bopopescu/Social-Lite
|
ee05d6a7431c36ff582c8d6b58bb20a8c5f550bf
|
[
"Apache-2.0"
] | 1 |
2020-07-25T18:17:57.000Z
|
2020-07-25T18:17:57.000Z
|
"""Generated message classes for datacatalog version v1beta1.
A fully managed and highly scalable data discovery and metadata management
service.
"""
# NOTE: This file is autogenerated and should not be edited by hand.
from apitools.base.protorpclite import messages as _messages
from apitools.base.py import encoding
package = 'datacatalog'
encoding.AddCustomJsonFieldMapping(
StandardQueryParameters, 'f__xgafv', '$.xgafv')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_1', '1')
encoding.AddCustomJsonEnumMapping(
StandardQueryParameters.FXgafvValueValuesEnum, '_2', '2')
| 42.644585 | 150 | 0.753051 |
bcf316f9ca83440854d986b21a72745912b71d9f
| 656 |
py
|
Python
|
materials/migrations/0072_auto_20190422_1708.py
|
mgovoni-devel/MatD3
|
5b68d147f886bce427f92bb560159e62cec2d4e7
|
[
"BSD-2-Clause-FreeBSD"
] | 7 |
2019-09-14T07:24:09.000Z
|
2021-06-15T16:15:05.000Z
|
materials/migrations/0072_auto_20190422_1708.py
|
mgovoni-devel/MatD3
|
5b68d147f886bce427f92bb560159e62cec2d4e7
|
[
"BSD-2-Clause-FreeBSD"
] | 14 |
2019-12-05T01:49:19.000Z
|
2021-06-23T18:34:51.000Z
|
materials/migrations/0072_auto_20190422_1708.py
|
mgovoni-devel/MatD3
|
5b68d147f886bce427f92bb560159e62cec2d4e7
|
[
"BSD-2-Clause-FreeBSD"
] | 2 |
2019-11-06T21:16:57.000Z
|
2019-11-30T10:51:44.000Z
|
# Generated by Django 2.1.7 on 2019-04-22 21:08
from django.db import migrations
| 22.62069 | 49 | 0.565549 |
bcf492dcec78d6b358e2430eb0bbca995c069560
| 5,054 |
py
|
Python
|
Deep-Learning/Crowd-Count/src/data_preprocess.py
|
sadbb/CVCode
|
c7c8b527af786d8f113122231e6296987b242b59
|
[
"Apache-2.0"
] | 1 |
2018-11-18T05:43:05.000Z
|
2018-11-18T05:43:05.000Z
|
Deep-Learning/Crowd-Count/src/data_preprocess.py
|
sadbb/CVCode
|
c7c8b527af786d8f113122231e6296987b242b59
|
[
"Apache-2.0"
] | null | null | null |
Deep-Learning/Crowd-Count/src/data_preprocess.py
|
sadbb/CVCode
|
c7c8b527af786d8f113122231e6296987b242b59
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# ------------------------
# written by Songjian Chen
# 2018-10
# ------------------------
import os
import skimage.io
from skimage.color import rgb2gray
import skimage.transform
from scipy.io import loadmat
import numpy as np
import cv2
import math
import warnings
import random
import torch
import matplotlib.pyplot as plt
warnings.filterwarnings("ignore")
extract_test_data()
| 38.287879 | 109 | 0.623467 |
bcf55731b896385e43f5d67d9f858074f1791555
| 38 |
py
|
Python
|
for1.py
|
satyamraj123/set-of-python-programs
|
c9a20b37cddc555704799e5ff277488b7eff58a9
|
[
"Apache-2.0"
] | null | null | null |
for1.py
|
satyamraj123/set-of-python-programs
|
c9a20b37cddc555704799e5ff277488b7eff58a9
|
[
"Apache-2.0"
] | null | null | null |
for1.py
|
satyamraj123/set-of-python-programs
|
c9a20b37cddc555704799e5ff277488b7eff58a9
|
[
"Apache-2.0"
] | null | null | null |
fruit='banana'
x=len(fruit)
print(x)
| 12.666667 | 15 | 0.684211 |
bcf5a205651a80f9d34336bef6e346a1933e40ae
| 689 |
py
|
Python
|
Django_Intershala/recruiter/migrations/0004_auto_20210305_1551.py
|
samir321-pixel/Django_Intershala
|
77aaa24a34873dab4c3302727d5f43986a99809e
|
[
"MIT"
] | 7 |
2021-03-08T17:09:39.000Z
|
2021-12-30T09:44:44.000Z
|
Django_Intershala/recruiter/migrations/0004_auto_20210305_1551.py
|
samir321-pixel/Django_Intershala
|
77aaa24a34873dab4c3302727d5f43986a99809e
|
[
"MIT"
] | null | null | null |
Django_Intershala/recruiter/migrations/0004_auto_20210305_1551.py
|
samir321-pixel/Django_Intershala
|
77aaa24a34873dab4c3302727d5f43986a99809e
|
[
"MIT"
] | 2 |
2021-03-03T11:35:05.000Z
|
2021-03-22T17:00:16.000Z
|
# Generated by Django 3.1.7 on 2021-03-05 10:21
from django.db import migrations, models
| 24.607143 | 62 | 0.576197 |
bcf633a886ab43d9c3c7c35185345d3f776c81e3
| 4,899 |
py
|
Python
|
src/promnesia/sources/telegram.py
|
halhenke/promnesia
|
03f46b7e0740790ef091e6f48d0ac2e6bf05bcb7
|
[
"MIT"
] | 1,327 |
2019-11-02T20:10:38.000Z
|
2022-03-29T16:58:36.000Z
|
src/promnesia/sources/telegram.py
|
halhenke/promnesia
|
03f46b7e0740790ef091e6f48d0ac2e6bf05bcb7
|
[
"MIT"
] | 157 |
2019-09-06T11:16:40.000Z
|
2022-03-27T20:01:52.000Z
|
src/promnesia/sources/telegram.py
|
halhenke/promnesia
|
03f46b7e0740790ef091e6f48d0ac2e6bf05bcb7
|
[
"MIT"
] | 60 |
2020-06-08T22:12:24.000Z
|
2022-03-22T16:57:22.000Z
|
'''
Uses [[https://github.com/fabianonline/telegram_backup#readme][telegram_backup]] database for messages data
'''
from pathlib import Path
from textwrap import dedent
from typing import Optional, Union, TypeVar
from urllib.parse import unquote # TODO mm, make it easier to rememember to use...
from ..common import PathIsh, Visit, get_logger, Loc, extract_urls, from_epoch, Results, echain
# TODO potentially, belongs to my. package
# TODO kython?
T = TypeVar("T")
# TODO move to common?
| 37.684615 | 154 | 0.560931 |
bcf6d0a350e5ace0a39c3f35ff8dbbc6f050f1f4
| 6,407 |
py
|
Python
|
shellmacros/istr.py
|
duaneellissd/shellmacros
|
33b5cd1a8794e35a9540f78dca066b8dfc289c97
|
[
"BSD-2-Clause"
] | null | null | null |
shellmacros/istr.py
|
duaneellissd/shellmacros
|
33b5cd1a8794e35a9540f78dca066b8dfc289c97
|
[
"BSD-2-Clause"
] | null | null | null |
shellmacros/istr.py
|
duaneellissd/shellmacros
|
33b5cd1a8794e35a9540f78dca066b8dfc289c97
|
[
"BSD-2-Clause"
] | null | null | null |
'''
Created on Dec 27, 2019
@author: duane
'''
DOLLAR = ord('$')
LBRACE = ord('{')
RBRACE = ord('}')
LPAREN = ord('(')
RPAREN = ord(')')
def test_istr():
check(-1, -1, "")
check(-1, -1, "a")
check(-1, -1, "ab")
check(-1, -1, "abc")
check(-1, -1, "abcd")
check(-1, -1, "abcde")
check(-1, -1, "abcdef")
check(0, 4, "${a}")
check(0, 5, "${ab}")
check(0, 6, "${abc}")
check(0, 7, "${abcd}")
check(1, 5, "a${a}")
check(2, 6, "ab${a}")
check(3, 7, "abc${a}")
check(4, 8, "abcd${a}")
check(5, 9, "abcde${a}")
check(0, 4, "${a}a")
check(0, 4, "${a}ab")
check(0, 4, "${a}abc")
check(0, 4, "${a}abcd")
check(0, 4, "${a}abcde")
dut = check(4, 8, "abcd${a}xyz")
dut.replace(4, 8, "X")
check2(-1, -1, None, dut)
r = str(dut)
print("Got: %s" % r)
assert ("abcdXxyz" == str(dut))
# now nested tests
dut = check(5, 9, "abc${${Y}}xyz")
dut.replace(5, 9, "X")
r = str(dut)
assert (r == "abc${X}xyz")
dut = check2(3, 7, "${X}", dut)
dut.replace(3, 7, "ABC")
s = str(dut)
r = "abcABCxyz"
assert (s == r)
print("Success")
if __name__ == '__main__':
test_istr()
| 27.856522 | 81 | 0.517715 |
bcf76125149120b7d959b455bacb0c98cf4095f0
| 7,712 |
py
|
Python
|
cli.py
|
checktheroads/deenis
|
2581e2fcbb08a9c85590bd54e109f24cc87b664f
|
[
"WTFPL"
] | 4 |
2019-07-18T18:16:31.000Z
|
2020-02-28T08:39:58.000Z
|
cli.py
|
checktheroads/deenis
|
2581e2fcbb08a9c85590bd54e109f24cc87b664f
|
[
"WTFPL"
] | null | null | null |
cli.py
|
checktheroads/deenis
|
2581e2fcbb08a9c85590bd54e109f24cc87b664f
|
[
"WTFPL"
] | null | null | null |
#!/usr/bin/env python3
"""
CLI for Accessing Deenis
"""
# Standard Imports
import sys
from pathlib import Path
# Module Imports
import click
# Path Fixes
working_dir = Path(__file__).resolve().parent
sys.path.append(str(working_dir))
# Project Imports
from deenis import Deenis
if __name__ == "__main__":
add_records()
| 36.72381 | 86 | 0.490794 |
bcf7f47be4d0d789e4869009ef9f2f68c5ab3b33
| 5,383 |
py
|
Python
|
main_cl.py
|
spiolynn/pybo
|
186495de315eb8ec47a996de959574f9864da7c4
|
[
"MIT"
] | null | null | null |
main_cl.py
|
spiolynn/pybo
|
186495de315eb8ec47a996de959574f9864da7c4
|
[
"MIT"
] | null | null | null |
main_cl.py
|
spiolynn/pybo
|
186495de315eb8ec47a996de959574f9864da7c4
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from bigone import BigOneDog
from common import gen_logger
import logging
import time
import json
def strategy_eth_big_bnc_eth(dog):
"""
BIG/ETH -> BIG/BNC -> ETH/BNC
ETH/BNC -> BIG/BNC -> BIG/ETH
:param dog: implemention of BigOneDog
:return:
"""
big_eth_data = dog.get_order_book('BIG-ETH')
big_bnc_data = dog.get_order_book('BIG-BNC')
eth_bnc_data = dog.get_order_book('ETH-BNC')
print('BIG-ETH')
print('', big_eth_data['asks'][0]['price'], big_eth_data['asks'][0]['amount'])
print('', big_eth_data['bids'][0]['price'], big_eth_data['bids'][0]['amount'])
print('BIG-BNC')
print('', big_bnc_data['asks'][0]['price'], big_bnc_data['asks'][0]['amount'])
print('', big_bnc_data['bids'][0]['price'], big_bnc_data['bids'][0]['amount'])
print('ETH-BNC')
print('', eth_bnc_data['asks'][0]['price'], eth_bnc_data['asks'][0]['amount'])
print('', eth_bnc_data['bids'][0]['price'], eth_bnc_data['bids'][0]['amount'])
# positive transaction
pos_anc = 0.999*0.999*0.999*\
((1 / (float(big_eth_data['asks'][0]['price'])))
* float(big_bnc_data['bids'][0]['price']) )
pos_anc = pos_anc / float(eth_bnc_data['asks'][0]['price']) - 1
# negative transaction
neg_anc = 0.999 * 0.999 * 0.999 * \
(float(eth_bnc_data['bids'][0]['price'])
/ float(big_bnc_data['asks'][0]['price'])
* float(big_eth_data['asks'][0]['price']))
neg_anc = neg_anc / 1 - 1
flag = False
amt = 2.0
if float(big_eth_data['asks'][0]['amount']) >= amt:
if float(big_bnc_data['bids'][0]['amount']) >= amt:
if float(eth_bnc_data['asks'][0]['amount']) >= amt * float(big_eth_data['asks'][0]['price']):
flag = True
msg = "[:BIG/ETH -> BIG/BNC -> ETH/BNC]:"
if pos_anc < 0.01:
result = "1%, 0"
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "1%"
if flag is False:
result = "{},{}".format(result,", 0")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
else:
result = "{},{}".format(result," 1")
logger.info("{0} {1:.2f}%, {2}".format(msg,pos_anc*100,result))
print("{} {} {} {}".format('BIG-ETH','BID', big_eth_data['asks'][0]['price'], str(amt)))
print("{} {} {} {}".format('BIG-BNC','ASK', big_bnc_data['bids'][0]['price'], str(amt)))
print("{} {} {} {}".format('ETH-BNC','BID', eth_bnc_data['asks'][0]['price'],
str(amt * float(big_eth_data['asks'][0]['price']))))
# dog.create_order('BIG-ETH','ASK', big_eth_data['asks'][0]['price'], '2.0')
# dog.create_order('BIG-BNC','BID', big_bnc_data['bids'][0]['price'], '2.0')
# dog.create_order('ETH-BNC','ASK', eth_bnc_data['asks'][0]['price'],
# str(2.0 * float(big_eth_data['asks'][0]['price'])))
return True
if neg_anc < 0.01:
result = "1%, 0"
else:
result = "1%, 1"
logger.info("[:ETH/BNC -> BIG/BNC -> BIG/ETH]: {0:.2f}%, {1}".format(neg_anc*100,result))
return False
# return pos_anc, neg_anc
if __name__ == '__main__':
gen_logger('bigonetest')
logger = logging.getLogger("bigone")
with open("PRIVATE_KEY.json",'r') as f:
private_key = json.load(f)["key"]
dog = BigOneDog(private_key)
# strategy_eth_bnc(dog)
# dog.get_orders("ETH-BNC",'10')
# r = dog.get_order("b79ef031-c477-46f9-b452-7e97aa97435d")
# print(r)
# r = dog.get_orders('ETH-BNC','10')
# print(r)
while True:
flag = strategy_eth_big_bnc_eth(dog)
if flag is True:
break
else:
print("10")
print("")
time.sleep(10)
# break
# pos_anc, neg_anc = strategy_eth_bnc(dog)
# if pos_anc < 0.01:
# result = "1%, 0"
# else:
# result = "1%, 1"
#
# logger.info("[:BIG/ETH -> BIG/BNC -> ETH/BNC]: {0:.2f}%, {1}".format(pos_anc*100,result))
#
# if neg_anc < 0.01:
# result = "1%, 0"
# else:
# result = "1%, 1"
#
# logger.info("[:ETH/BNC -> BIG/BNC -> BIG/ETH]: {0:.2f}%, {1}".format(neg_anc*100,result))
#
# print("10")
# print("")
# time.sleep(10)
| 35.886667 | 112 | 0.546907 |
bcf86b7e6462408e17d610983a6cb23985d20fe4
| 1,191 |
py
|
Python
|
run_experiments.py
|
gahaalt/cifar-vs-tensorflow2
|
547d131382438ef76e315dde06a6870737f1fbad
|
[
"MIT"
] | 6 |
2019-11-15T08:42:29.000Z
|
2021-03-04T11:58:39.000Z
|
run_experiments.py
|
gahaalt/cifar-vs-tensorflow2
|
547d131382438ef76e315dde06a6870737f1fbad
|
[
"MIT"
] | null | null | null |
run_experiments.py
|
gahaalt/cifar-vs-tensorflow2
|
547d131382438ef76e315dde06a6870737f1fbad
|
[
"MIT"
] | 3 |
2020-11-25T03:44:41.000Z
|
2021-03-08T04:45:56.000Z
|
import os
import yaml
import logging
import importlib
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
logging.getLogger('tensorflow').disabled = True
from cifar_training_tools import cifar_training, cifar_error_test
print('\n' + '#' * 19)
print("TESTING FOR ERRORS!")
print('#' * 19)
stream = open('experiments.yaml', 'r')
for exp in yaml.safe_load_all(stream):
if 'skip_error_test' in exp and exp['skip_error_test']:
continue
model = getattr(importlib.import_module(exp['module']), exp['model'])
cifar_error_test(model(**exp['model_parameters']))
print("OK!")
print('\n' + '#' * 22)
print("MODEL TRAINING BEGINS!")
print('#' * 22)
stream = open('experiments.yaml', 'r')
for exp in yaml.safe_load_all(stream):
print(); print_dict(exp); print();
model = getattr(importlib.import_module(exp['module']), exp['model'])
cifar_training(model(**exp['model_parameters']), **exp['train_parameters'])
| 27.697674 | 79 | 0.628044 |
bcf8d925c5a3910be4a945e6cce5d1278db5fcb4
| 179 |
py
|
Python
|
json2yaml.py
|
cristicalin/tools
|
b8fe4efb1143a575d102d3a8e368052a4ecdceae
|
[
"MIT"
] | null | null | null |
json2yaml.py
|
cristicalin/tools
|
b8fe4efb1143a575d102d3a8e368052a4ecdceae
|
[
"MIT"
] | 1 |
2016-01-31T12:54:06.000Z
|
2016-02-29T13:45:46.000Z
|
json2yaml.py
|
cristicalin/tools
|
b8fe4efb1143a575d102d3a8e368052a4ecdceae
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import yaml
import json
if __name__ == '__main__':
content = json.load(sys.stdin)
print yaml.dump(content, indent=2, default_flow_style=False)
| 16.272727 | 62 | 0.73743 |
bcf9bd066aefdc4f6abca126693e2677662eb927
| 1,542 |
py
|
Python
|
histdata/mt5db/script_DownloadAndStoreToMongodb.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 1 |
2018-07-02T13:54:49.000Z
|
2018-07-02T13:54:49.000Z
|
histdata/mt5db/script_DownloadAndStoreToMongodb.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | null | null | null |
histdata/mt5db/script_DownloadAndStoreToMongodb.py
|
UpSea/midProjects
|
ed6086e74f68b1b89f725abe0b270e67cf8993a8
|
[
"MIT"
] | 3 |
2016-05-28T15:13:02.000Z
|
2021-04-10T06:04:25.000Z
|
# -*- coding: utf-8 -*-
import os,sys
from PyQt4 import QtGui,QtCore
dataRoot = os.path.abspath(os.path.join(os.path.dirname(__file__),os.pardir,os.pardir,'histdata'))
sys.path.append(dataRoot)
import dataCenter as dataCenter
from data.mongodb.DataSourceMongodb import Mongodb
import datetime as dt
if __name__ == '__main__':
#app = QtGui.QApplication(sys.argv)
#mid-----------------------------------------------------------------------------------------------------------------------------
subMain()
#mid-----------------------------------------------------------------------------------------------------------------------------
#sys.exit(app.exec_())
| 37.609756 | 133 | 0.527237 |
bcf9e42ce187d88ea3d733bded3e343188bcd463
| 10,196 |
py
|
Python
|
daproli/transformer.py
|
ermshaua/daproli
|
c1f7aeec431d9c60ae06eeac23455c1a03bc82cf
|
[
"BSD-3-Clause"
] | null | null | null |
daproli/transformer.py
|
ermshaua/daproli
|
c1f7aeec431d9c60ae06eeac23455c1a03bc82cf
|
[
"BSD-3-Clause"
] | null | null | null |
daproli/transformer.py
|
ermshaua/daproli
|
c1f7aeec431d9c60ae06eeac23455c1a03bc82cf
|
[
"BSD-3-Clause"
] | null | null | null |
from joblib import Parallel, delayed
from tqdm import tqdm
from .processing import map, filter, split, expand, combine, join
from .manipulation import windowed, flatten
| 36.028269 | 127 | 0.640643 |
bcfa0b019701139c1bd20ee0f0d8361e7deda90e
| 90 |
py
|
Python
|
Ad-Hoc/2454.py
|
LorranSutter/URI-Online-Judge
|
aef885b9a7caa83484cf172e29eea8ec92fc3627
|
[
"MIT"
] | null | null | null |
Ad-Hoc/2454.py
|
LorranSutter/URI-Online-Judge
|
aef885b9a7caa83484cf172e29eea8ec92fc3627
|
[
"MIT"
] | null | null | null |
Ad-Hoc/2454.py
|
LorranSutter/URI-Online-Judge
|
aef885b9a7caa83484cf172e29eea8ec92fc3627
|
[
"MIT"
] | null | null | null |
P, R = input().split()
if P == '0': print('C')
elif R == '0': print('B')
else: print('A')
| 18 | 25 | 0.488889 |
bcfa7e8108972dea4c27619df4c1be7b06458b6e
| 3,813 |
py
|
Python
|
main.py
|
brunotoshio/castella
|
ad418bd1beb4953687a4ad7be586b12631c25992
|
[
"MIT"
] | 2 |
2020-02-18T09:41:43.000Z
|
2020-02-20T11:03:03.000Z
|
main.py
|
brunotoshio/castella
|
ad418bd1beb4953687a4ad7be586b12631c25992
|
[
"MIT"
] | null | null | null |
main.py
|
brunotoshio/castella
|
ad418bd1beb4953687a4ad7be586b12631c25992
|
[
"MIT"
] | null | null | null |
import pymongo
import yaml
import sched
import time
import json
from castella import TweetCrawler
if __name__ == "__main__":
searcher = Castella()
searcher.execute_search()
| 37.019417 | 150 | 0.540782 |
bcfabdd28c428dd3bd0fa4eb4f234286130b7db0
| 1,275 |
py
|
Python
|
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py
|
monroid/openvino
|
8272b3857ef5be0aaa8abbf7bd0d5d5615dc40b6
|
[
"Apache-2.0"
] | 2,406 |
2020-04-22T15:47:54.000Z
|
2022-03-31T10:27:37.000Z
|
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 4,948 |
2020-04-22T15:12:39.000Z
|
2022-03-31T18:45:42.000Z
|
ngraph/test/frontend/paddlepaddle/test_models/gen_scripts/generate_slice.py
|
thomas-yanxin/openvino
|
031e998a15ec738c64cc2379d7f30fb73087c272
|
[
"Apache-2.0"
] | 991 |
2020-04-23T18:21:09.000Z
|
2022-03-31T18:40:57.000Z
|
#
# slice paddle model generator
#
import numpy as np
from save_model import saveModel
import paddle as pdpd
import sys
data_type = 'float32'
if __name__ == "__main__":
main()
| 32.692308 | 116 | 0.622745 |
bcfb7330e40f9b79f2ab184f143d401951828548
| 2,513 |
py
|
Python
|
tacker/sol_refactored/common/vnf_instance_utils.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 116 |
2015-10-18T02:57:08.000Z
|
2022-03-15T04:09:18.000Z
|
tacker/sol_refactored/common/vnf_instance_utils.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 6 |
2016-11-07T22:15:54.000Z
|
2021-05-09T06:13:08.000Z
|
tacker/sol_refactored/common/vnf_instance_utils.py
|
h1r0mu/tacker
|
8c69dda51fcfe215c4878a86b82018d2b96e5561
|
[
"Apache-2.0"
] | 166 |
2015-10-20T15:31:52.000Z
|
2021-11-12T08:39:49.000Z
|
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__) # not used at the moment
# see IETF RFC 7396
| 32.217949 | 78 | 0.68842 |
bcfca1189da0e63d3e685ea19031e90196e49d8d
| 719 |
py
|
Python
|
testfixtures/compat.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
testfixtures/compat.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
testfixtures/compat.py
|
cjw296/testfixtures
|
1bf1e6fe1e111210d6d7fbcd00feb564095ffd02
|
[
"MIT"
] | null | null | null |
# compatibility module for different python versions
import sys
if sys.version_info[:2] > (3, 0):
PY2 = False
PY3 = True
Bytes = bytes
Unicode = str
basestring = str
class_type_name = 'class'
ClassType = type
exception_module = 'builtins'
new_class = type
self_name = '__self__'
from io import StringIO
xrange = range
else:
PY2 = True
PY3 = False
Bytes = str
Unicode = unicode
basestring = basestring
class_type_name = 'type'
from types import ClassType
exception_module = 'exceptions'
from new import classobj as new_class
self_name = 'im_self'
from cStringIO import StringIO
xrange = xrange
| 18.435897 | 52 | 0.635605 |
bcfd55447233f3a98240a98d95e5f9301c8b38ec
| 3,898 |
py
|
Python
|
old_py2/tests/models_tests/notifications/test_match_score.py
|
ofekashery/the-blue-alliance
|
df0e47d054161fe742ac6198a6684247d0713279
|
[
"MIT"
] | 266 |
2015-01-04T00:10:48.000Z
|
2022-03-28T18:42:05.000Z
|
tests/models_tests/notifications/test_match_score.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 2,673 |
2015-01-01T20:14:33.000Z
|
2022-03-31T18:17:16.000Z
|
tests/models_tests/notifications/test_match_score.py
|
gregmarra/the-blue-alliance
|
5bedaf5c80b4623984760d3da3289640639112f9
|
[
"MIT"
] | 230 |
2015-01-04T00:10:48.000Z
|
2022-03-26T18:12:04.000Z
|
import re
import unittest2
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from consts.notification_type import NotificationType
from helpers.event.event_test_creator import EventTestCreator
from models.team import Team
from models.notifications.match_score import MatchScoreNotification
| 41.913978 | 94 | 0.69882 |
bcfdc746f65446c3a274947a1481597ff88c7469
| 5,406 |
py
|
Python
|
util/submission/templates.py
|
jeanlucf22/mgmol
|
4e79bc32c14c8a47ae18ad0659ea740719c8b77f
|
[
"BSD-3-Clause-LBNL",
"FSFAP"
] | 25 |
2018-12-29T03:33:01.000Z
|
2021-05-08T12:52:27.000Z
|
util/submission/templates.py
|
jeanlucf22/mgmol
|
4e79bc32c14c8a47ae18ad0659ea740719c8b77f
|
[
"BSD-3-Clause-LBNL",
"FSFAP"
] | 121 |
2018-12-19T02:38:21.000Z
|
2021-12-20T16:29:24.000Z
|
util/submission/templates.py
|
jeanlucf22/mgmol
|
4e79bc32c14c8a47ae18ad0659ea740719c8b77f
|
[
"BSD-3-Clause-LBNL",
"FSFAP"
] | 15 |
2019-02-17T05:28:43.000Z
|
2022-02-28T05:24:11.000Z
|
md_template_d144 = """verbosity=0
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=160
ny=80
nz=80
[Domain]
ox=0.
oy=0.
oz=0.
lx=42.4813
ly=21.2406
lz=21.2406
[Potentials]
pseudopotential=pseudo.D_tm_pbe
[Poisson]
solver=@
max_steps_initial=@50
max_steps=@50
reset=@
bcx=periodic
bcy=periodic
bcz=periodic
[Run]
type=MD
[MD]
type=@
num_steps=@
dt=@15.
[XLBOMD]
dissipation=@5
align=@
[Quench]
max_steps=@5
max_steps_tight=@
atol=1.e-@10
num_lin_iterations=3
ortho_freq=100
[SpreadPenalty]
type=@energy
damping=@
[email protected]
[email protected]
[Orbitals]
initial_type=Gaussian
initial_width=1.5
overallocate_factor=@2.
[ProjectedMatrices]
solver=@short_sighted
[LocalizationRegions]
radius=@8.
auxiliary_radius=@
[email protected]
[Restart]
input_filename=wave.out
input_level=3
interval=@
"""
md_template_H2O_64 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=128
ny=128
nz=128
[Domain]
ox=0.
oy=0.
oz=0.
lx=23.4884
ly=23.4884
lz=23.4884
[Potentials]
pseudopotential=pseudo.O_ONCV_PBE_SG15
pseudopotential=pseudo.D_ONCV_PBE_SG15
[Poisson]
solver=@
max_steps=@
[Run]
type=MD
[Quench]
max_steps=1000
atol=1.e-@
[MD]
type=@
num_steps=@
dt=10.
print_interval=5
[XLBOMD]
dissipation=@
align=@
[Restart]
input_filename=wave.out
input_level=4
output_level=4
interval=@
"""
quench_template_H2O_64 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=128
ny=128
nz=128
[Domain]
ox=0.
oy=0.
oz=0.
lx=23.4884
ly=23.4884
lz=23.4884
[Potentials]
pseudopotential=pseudo.O_ONCV_PBE_SG15
pseudopotential=pseudo.D_ONCV_PBE_SG15
[Run]
type=QUENCH
[Quench]
max_steps=1000
atol=1.e-8
[Orbitals]
initial_type=Fourier
[Restart]
output_level=4
"""
quench_template_d144 = """verbosity=1
xcFunctional=PBE
FDtype=4th
[Mesh]
nx=160
ny=80
nz=80
[Domain]
ox=0.
oy=0.
oz=0.
lx=42.4813
ly=21.2406
lz=21.2406
[Potentials]
pseudopotential=pseudo.D_tm_pbe
[Poisson]
solver=@
max_steps_initial=@50
max_steps=@50
bcx=periodic
bcy=periodic
bcz=periodic
[Run]
type=QUENCH
[Quench]
max_steps=200
atol=1.e-7
num_lin_iterations=3
ortho_freq=100
[SpreadPenalty]
type=@energy
damping=@
[email protected]
[email protected]
[Orbitals]
initial_type=Gaussian
initial_width=1.5
[ProjectedMatrices]
solver=@short_sighted
[LocalizationRegions]
radius=@8.
[Restart]
output_type=distributed
"""
H2O_64_params={
'nodes': '32',
'ntasks': '256',
'omp_num_threads': 8 if omp_num_threads == 4 else omp_num_threads,
'cores_per_task': '2',
'potentials': 'ln -s $maindir/potentials/pseudo.O_ONCV_PBE_SG15\nln -s $maindir/potentials/pseudo.D_ONCV_PBE_SG15',
'lrs': '',
'jobname': 'H2O_64',
}
d144_params={
'nodes': '8',
'walltime': '01:30:00',
'ntasks': '125',
'omp_num_threads': omp_num_threads,
'cores_per_task': '1',
'potentials': 'ln -s $maindir/potentials/pseudo.D_tm_pbe',
'lrs': '-l lrs.in',
'jobname': 'd144',
}
vulcan_params={
'queue': 'psmall',
'scratch_path': '/p/lscratchv/mgmolu/dunn27/mgmol/',
'gres': 'lscratchv',
'exe': 'mgmol-bgq',
}
cab_params={
'queue': 'pbatch',
'scratch_path': '/p/lscratchd/dunn27/mgmol/',
'gres': 'lscratchd',
'omp_num_threads': '1',
'exe': 'mgmol-pel',
'walltime': '01:30:00',
}
runfile_quench_template="""#!/bin/tcsh
#MSUB -l nodes={nodes},walltime={walltime}
#MSUB -o mgmol.out
#MSUB -q {queue}
#MSUB -A comp
#MSUB -l gres={gres}
#MSUB -N {jobname}
rm -f queued
echo ' ' > running
use boost-nompi-1.55.0
export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0
export Boost_NO_SYSTEM_PATHS=ON
setenv OMP_NUM_THREADS {omp_num_threads}
set ntasks = {ntasks}
set maindir = $home/mgmol
set exe = $maindir/bin/{exe}
set datadir = `pwd`
set scratchdir = {scratch_path}`basename $datadir`
mkdir $scratchdir
cd $scratchdir
echo ' ' > running
set cfg_quench = mgmol_quench.cfg
cp $datadir/$cfg_quench .
cp $datadir/coords.in .
cp $datadir/lrs.in .
{potentials}
#1st run
srun -n $ntasks -c {cores_per_task} $exe -c $cfg_quench -i coords.in {lrs}
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
rm -f running
echo ' ' > queued
"""
runfile_md_template="""#!/bin/tcsh
#MSUB -l nodes={nodes},walltime={walltime}
#MSUB -o mgmol.out
#MSUB -q {queue}
#MSUB -A comp
#MSUB -l gres={gres}
#MSUB -N {jobname}
rm -f queued
echo ' ' > running
use boost-nompi-1.55.0
export BOOST_ROOT=/usr/local/tools/boost-nompi-1.55.0
export Boost_NO_SYSTEM_PATHS=ON
setenv OMP_NUM_THREADS {omp_num_threads}
set ntasks = {ntasks}
set maindir = $home/mgmol
set exe = $maindir/bin/{exe}
set datadir = `pwd`
set scratchdir = {scratch_path}`basename $datadir`
mkdir $scratchdir
cd $scratchdir
echo ' ' > running
set cfg_md = mgmol_md.cfg
cp $datadir/$cfg_md .
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
#MD run
srun -n $ntasks -c {cores_per_task} $exe -c $cfg_md
#restart
rm -f wave.out
set restart_file=`ls -ld * | awk '/snapshot0/ {{ print $9 }}' | tail -n1`
ln -s -f $restart_file wave.out
rm -f running
echo ' ' > queued
"""
| 17.216561 | 119 | 0.663707 |
bcfde8681fdc58448a7018049cb36bbab73499b0
| 21,700 |
py
|
Python
|
Compliant_control/Force Tracking/archive/VIC_Huang1992_(main 09.03).py
|
martihmy/Compliant_control
|
485f627fa83d59f414f41bd57c5d37528ef5f1ec
|
[
"Apache-2.0"
] | null | null | null |
Compliant_control/Force Tracking/archive/VIC_Huang1992_(main 09.03).py
|
martihmy/Compliant_control
|
485f627fa83d59f414f41bd57c5d37528ef5f1ec
|
[
"Apache-2.0"
] | null | null | null |
Compliant_control/Force Tracking/archive/VIC_Huang1992_(main 09.03).py
|
martihmy/Compliant_control
|
485f627fa83d59f414f41bd57c5d37528ef5f1ec
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
import copy
from copy import deepcopy
import rospy
import threading
import quaternion
import numpy as np
from geometry_msgs.msg import Point
from visualization_msgs.msg import *
from franka_interface import ArmInterface
from panda_robot import PandaArm
import matplotlib.pyplot as plt
from scipy.spatial.transform import Rotation
np.set_printoptions(precision=2)
"""
This is a FORCE-BASED VARIABLE IMPEDANCE CONTROLLER based on [Huang1992: Compliant Motion Control of Robots by Using Variable Impedance]
To achieve force tracking, the apparent stiffness (K) and damping (B) is dynamically adjusted through functions dependent on the error in position, velocity and force
About the code/controller:
1] Only stiffness and damping in the 'z'-direction is adaptive, the rest are static
2] Due to the faulted joint velocities (read from rostopics), the more noisy,
numerically derived derivatives of the joint position are prefered to be
used in the controller { get_x_dot(..., numerically = True) }
3] You can now choose between perform_torque_Huang1992() and perform_torque_DeSchutter()
- DeSchutter's control-law offers geometrically consitent stiffness and is more computationally expensive
4] The default desired motion- and force-trajectories are now made in a time-consistent matter, so that the PUBLISH RATE can be altered without messing up the desired behaviour.
The number of iterations is calculated as a function of the controller's control-cycle, T: (max_num_it = duration(=15 s) / T)
"""
# --------- Constants -----------------------------
#print(robot.joint_ordered_angles()) #Read the robot's joint-angles
#new_start = {'panda_joint1': 1.938963389436404, 'panda_joint2': 0.6757504724282993, 'panda_joint3': -0.43399745125475564, 'panda_joint4': -2.0375275954865573, 'panda_joint5': -0.05233040021194351, 'panda_joint6': 3.133254153457202, 'panda_joint7': 1.283328743909796}
# Stiffness
Kp = 30
Kpz = 30 #initial value (adaptive)
Ko = 900
K = np.array([[Kp, 0, 0, 0, 0, 0],
[0, Kp, 0, 0, 0, 0],
[0, 0, Kpz, 0, 0, 0],
[0, 0, 0, Ko, 0, 0],
[0, 0, 0, 0, Ko, 0],
[0, 0, 0, 0, 0, Ko]])
# Damping
Bp = Kp/7
Bpz = Bp # #initial value (adaptive)
Bo = 50
B = np.array([[Bp, 0, 0, 0, 0, 0],
[0, Bp, 0, 0, 0, 0],
[0, 0, Bpz, 0, 0, 0],
[0, 0, 0, Bo, 0, 0],
[0, 0, 0, 0, Bo, 0],
[0, 0, 0, 0, 0, Bo]])
# Apparent inertia
Mp = 10
Mo = 10
M_diag = np.array([Mp,Mp,Mp,Mo,Mo,Mo])
M = np.diagflat(M_diag)
# Constant matrices appearing in equation (50) of [Huang1992]
K_v = np.identity(6)
P = np.identity(6)
gamma = np.identity(18)
#gamma_M = 12
gamma_B = 0.001 #2 # The damping's rate of adaptivity (high value = slow changes)
gamma_K = 0.0005 #1 # The stiffness' rate of adaptivity (high value = slow changes)
#gamma[2,2] = gamma_M
gamma[8,8] = gamma_B
gamma[14,14] = gamma_K
duration = 15 #seconds SHOULD NOT BE ALTERED
"""Functions for generating desired MOTION trajectories"""
#1 Generate a desired trajectory for the manipulator to follow
#2 Generate a desired trajectory for the manipulator to follow
#3 Generate a (time-consistent) desired motion trajectory
"""Functions for generating desired FORCE trajectories"""
#1 Generate a desired force trajectory
#2 Generate an efficient desired force trajectory
#3 Generate a (time-consistent) desired force trajectory
# ------------ Helper functions --------------------------------
# Calculate the numerical derivative of a each row in a vector
# Saturation-function
# Return the cartesian (task-space) inertia of the manipulator [alternatively the inverse of it]
# Return the external forces (everything except for z-force is set to 0 due to offsets)
# Return the position and (relative) orientation
# Return the linear and angular velocities
# Numerically = True -> return the derivarive of the state-vector
# Numerically = False -> read values from rostopic (faulty in sim when interacting with the environment)
# Return the error in position and orientation
# Return the error in linear and angular velocities
# Return the error in linear and angular acceleration
# Return the cartesian (task-space) position
# Compute difference between quaternions and return Euler angle in radians as difference
# -------------- Main functions --------------------
# Get xi as it is described in equation (44) in [Huang1992]
# Calculate lambda_dot as in equation (50) in [Huang1992]
# Return the updated (adapted) Inertia, Damping and Stiffness matrices.
# Calculate and perform the torque as in equation (10) in [Huang1992]
"""
TESTING AREA (Functions needed to run an adaptive version of DeSchutter's impedance controller)
[with geometrically consistent stiffness]
"""
"""
TESTING AREA
"""
# -------------- Plotting ------------------------
if __name__ == "__main__":
# ---------- Initialization -------------------
rospy.init_node("impedance_control")
robot = PandaArm()
publish_rate = 250
rate = rospy.Rate(publish_rate)
T = 0.001*(1000/publish_rate)
max_num_it = int(duration /T)
#robot.move_to_joint_positions(new_start)
robot.move_to_neutral()
# List used to contain data needed for calculation of the torque output
lam = np.zeros(18)
v_history = np.zeros((6,max_num_it))
# Lists providing data for plotting
p_history = np.zeros((3,max_num_it))
v_history_num = np.zeros((6,max_num_it))
x_history = np.zeros((6,max_num_it))
delta_x_history = np.zeros((6,max_num_it))
F_ext_history = np.zeros((6,max_num_it))
z_dynamics_history = np.zeros((3,max_num_it))
# Specify the desired behaviour of the robot
x_d_ddot, x_d_dot, p_d = generate_desired_trajectory_tc(max_num_it,T,move_in_x = True)
goal_ori = np.asarray(robot.endpoint_pose()['orientation']) # goal orientation = current (initial) orientation [remains the same the entire duration of the run]
Rot_d = robot.endpoint_pose()['orientation_R'] # used by the DeSchutter implementation
F_d = generate_F_d_tc(max_num_it,T)
# ----------- The control loop -----------
for i in range(max_num_it):
# update state-lists
p_history[:,i] = get_p()
x_history[:,i] = get_x(goal_ori)
delta_x_history[:,i] = get_delta_x(goal_ori,p_d[:,i])
F_ext_history[:,i] = get_F_ext()
x_dot = get_x_dot(x_history,i,T, numerically=False) #chose 'numerically' either 'True' or 'False'
v_history_num[:,i] = get_x_dot(x_history,i,T, numerically=True) # only for plotting
v_history[:,i] = get_x_dot(x_history,i,T) # for calculating error in acceleration
# adapt M,B and K
xi = get_xi(goal_ori, p_d[:,i],x_dot, x_d_dot[:,i], x_d_ddot[:,i], v_history, i, T)
lam = lam.reshape([18,1]) + get_lambda_dot(gamma,xi,K_v,P,F_d[:,i]).reshape([18,1])*T
M_hat,B_hat,K_hat = update_MBK_hat(lam,M,B,K)
# Apply the resulting torque to the robot
"""CHOOSE ONE OF THE TWO CONTROLLERS BELOW"""
perform_torque_Huang1992(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], goal_ori)
#perform_torque_DeSchutter(M_hat, B_hat, K_hat, x_d_ddot[:,i], x_d_dot[:,i],x_dot, p_d[:,i], Rot_d)
rate.sleep()
# plotting and printing
z_dynamics_history[0][i]=M_hat[2][2]
z_dynamics_history[1][i]=B_hat[2][2]
z_dynamics_history[2][i]=K_hat[2][2]
# Live printing to screen when the controller is running
if i%100 == 0:
print(i,'/',max_num_it,' = ',T*i,' [s] ) Force in z: ',F_ext_history[2,i])
print(K_hat[2][2])
print('')
#Uncomment the block below to save plotting-data
"""
np.save('VIC_p_d.npy',p_d)
np.save('VIC_p.npy',p_history)
np.save('VIC_Fz_d.npy',F_d)
np.save('VIC_Fz.npy',F_ext_history[2])
np.save('VIC_delta_x.npy',delta_x_history) #orientation error in radians
np.save('VIC_adaptive_gains.npy',z_dynamics_history)
"""
plot_result(v_history_num,v_history, p_history, p_d, delta_x_history, F_ext_history, F_d, z_dynamics_history,M,B,K, T)
| 36.842105 | 267 | 0.638157 |
bcfe9ac7b39f229df7d1b4504478244ea3835c1b
| 483 |
py
|
Python
|
tests/migrations/0010_modeltest_datetime_field1.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 2 |
2020-03-17T00:53:23.000Z
|
2020-07-16T07:00:33.000Z
|
tests/migrations/0010_modeltest_datetime_field1.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 76 |
2019-12-05T01:15:57.000Z
|
2021-09-07T16:47:27.000Z
|
tests/migrations/0010_modeltest_datetime_field1.py
|
intellineers/django-bridger
|
ed097984a99df7da40a4d01bd00c56e3c6083056
|
[
"BSD-3-Clause"
] | 1 |
2020-02-05T15:09:47.000Z
|
2020-02-05T15:09:47.000Z
|
# Generated by Django 2.2.9 on 2020-01-28 14:50
import django.utils.timezone
from django.db import migrations, models
| 23 | 74 | 0.63354 |
4c016da0c81742ef879e9615198cd22dc666a5c6
| 6,998 |
py
|
Python
|
pycmap/common.py
|
mdashkezari/pycmap
|
5b526404d005ec220ab0911cd2f3c05263f9eda3
|
[
"MIT"
] | 4 |
2019-09-23T17:12:42.000Z
|
2022-02-01T02:38:40.000Z
|
pycmap/common.py
|
mdashkezari/pycmap
|
5b526404d005ec220ab0911cd2f3c05263f9eda3
|
[
"MIT"
] | 2 |
2019-09-20T12:56:21.000Z
|
2019-09-24T23:08:26.000Z
|
pycmap/common.py
|
mdashkezari/pycmap
|
5b526404d005ec220ab0911cd2f3c05263f9eda3
|
[
"MIT"
] | 1 |
2019-12-18T20:47:20.000Z
|
2019-12-18T20:47:20.000Z
|
"""
Author: Mohammad Dehghani Ashkezari <[email protected]>
Date: 2019-06-28
Function: Host a collection of shared multi-purpose helper functions.
"""
import os
import sys
from tqdm import tqdm
from colorama import Fore, Back, Style, init
import numpy as np
import pandas as pd
import webbrowser
import IPython
MAX_ROWS = 2000000
MAX_SAMPLE_SOURCE = 500000
def halt(msg):
"""Prints an error message and terminates the program."""
msg = '\n' + msg
init(convert=True)
print(Fore.RED + msg, file=sys.stderr)
print(Style.RESET_ALL, end='')
sys.exit(1)
return
def print_tqdm(msg, err=False):
"""Print helper function compatible with tqdmm progressbar."""
# init()
msg = '\n' + msg
if err:
tqdm.write(Fore.RED + msg)
else:
tqdm.write(msg)
tqdm.write(Style.RESET_ALL, end='')
return
def get_base_url():
"""Returns API root endpoint."""
return os.environ.get(
'CMAP_API_BASE_URL', 'https://simonscmap.com').rstrip('/')
def jupytered():
"""Returns True if jupyter notebook has invoked the package."""
jup = False
import __main__ as main
if not hasattr(main, '__file__'):
jup = True
return jup
def inline():
"""
Checks if the package results should get prepared for an "inline" context.
Currently, just calls the jupytered function.
"""
return jupytered()
def make_filename_by_table_var(table, variable, prefix=''):
"""Generate a filename (without extention) using table and variable names."""
if prefix != '': prefix += '_'
return prefix + variable + '_' + table
def canvas_rect(dw, dh):
"""Resizes a canvas dimensions so that it better fits on client browser."""
ar = dw / dh
h = 400 if ar > 3 else 500
w_min = 300
w_max = 1000
w = int(ar * h)
if w > w_max: w = w_max
if w < w_min: w = w_min
return w, h
def get_data_limits(data, quant=0.05):
"""Returns low and high quantile limits of a numeric array."""
data = np.array(data).flatten()
return np.nanquantile(data, quant), np.nanquantile(data, 1-quant)
# def get_token(token=None):
# token = token or os.environ.get('CMAP_API_KEY')
# if token in [None, '']:
# halt('API Key must be specified to access CMAP API')
# return token
def config_path():
"""Returns the path to the config spreadsheet file."""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'config.csv')
def initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir):
"""Creates a .csv file hosting the primary project configs """
if vizEngine is None: vizEngine = 'plotly'
if exportDir is None: exportDir = './export/'
if exportFormat is None: exportFormat = '.csv'
if figureDir is None: figureDir = './figure/'
config = {
'token': [token],
'vizEngine': [vizEngine],
'exportDir': [exportDir],
'exportFormat': [exportFormat],
'figureDir': [figureDir]
}
pd.DataFrame(config).to_csv(config_path(), index=False)
return
def remove_angle_brackets(token):
"""Removes angle brackets at start and end of the token, if exist."""
if token is not None:
if token[0] == '<': token = token[1:]
if token[-1] == '>': token = token[:-1]
return token
def save_config(token=None, vizEngine=None, exportDir=None, exportFormat=None, figureDir=None):
"""Updates the project's configs at the config spreadsheet."""
configPath = config_path()
if not os.path.isfile(configPath):
initiate_config_file(token, vizEngine, exportDir, exportFormat, figureDir)
df = pd.read_csv(configPath)
if token is not None:
df['token'] = remove_angle_brackets(token)
if vizEngine is not None:
supportedVizEngines = ['bokeh', 'plotly']
if vizEngine not in supportedVizEngines:
halt('%s is not a supported visualization library' % vizEngine)
df['vizEngine'] = vizEngine
if exportDir is not None:
df['exportDir'] = exportDir
if exportFormat is not None:
df['exportFormat'] = exportFormat
if figureDir is not None:
df['figureDir'] = figureDir
df.to_csv(configPath, index=False)
return
def load_config():
"""Loads the config spreadsheet and returns it as a dataframe."""
configPath = config_path()
if not os.path.isfile(configPath):
msg = '\nAPI key not found!\n'
msg = msg + 'Please pass the API key using the following code:\n'
msg = msg + 'import pycmap\n'
msg = msg + 'pycmap.API(<api_key>)\n'
halt(msg)
return pd.read_csv(configPath)
def get_token():
"""Returns the API key."""
return remove_angle_brackets(load_config()['token'][0])
def get_vizEngine():
"""Returns the visualization library name."""
return load_config()['vizEngine'][0]
def get_export_dir():
"""Returns the path to the export directory."""
return load_config()['exportDir'][0]
def get_export_format():
"""Returns the file format of the exported files."""
return load_config()['exportFormat'][0]
def get_figure_dir():
"""Returns the path to the figure directory."""
return load_config()['figureDir'][0]
def get_bokeh_tools():
"""Returns a list tools used along with a bokeh graph."""
return 'crosshair,pan,zoom_in,wheel_zoom,zoom_out,box_zoom,reset,save'
def normalize(vals, min_max=False):
"""Takes an array and either normalize to min/max, standardize it (remove the mean and divide by standard deviation)."""
if min_max:
normalized_vals=(vals-np.nanmin(vals))/(np.nanmax(vals)-np.nanmin(vals))
else:
normalized_vals=(vals-np.nanmean(vals))/np.nanstd(vals)
return normalized_vals
def open_HTML(path):
"""Display HTML file by defaut browser or inline in case jupyter is the caller."""
if jupytered():
vObj = IPython.display.IFrame(path, width=800, height=400)
IPython.display.display(vObj)
else:
path = 'file://' + os.path.realpath(path)
webbrowser.open(path, new=2)
return
| 33.32381 | 131 | 0.572021 |
4c02744bf60501dd6a24f21c67c4971193de0e1a
| 26,204 |
py
|
Python
|
appengine/monorail/api/v3/api_proto/projects_pb2.py
|
fknittel/git-retry-build
|
4d57dd6e8b7567daeb24b55f66bc5becd3d459f3
|
[
"BSD-3-Clause"
] | null | null | null |
appengine/monorail/api/v3/api_proto/projects_pb2.py
|
fknittel/git-retry-build
|
4d57dd6e8b7567daeb24b55f66bc5becd3d459f3
|
[
"BSD-3-Clause"
] | 4 |
2022-03-17T18:58:21.000Z
|
2022-03-17T18:58:22.000Z
|
appengine/monorail/api/v3/api_proto/projects_pb2.py
|
fknittel/git-retry-build
|
4d57dd6e8b7567daeb24b55f66bc5becd3d459f3
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: api/v3/api_proto/projects.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from api.v3.api_proto import project_objects_pb2 as api_dot_v3_dot_api__proto_dot_project__objects__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='api/v3/api_proto/projects.proto',
package='monorail.v3',
syntax='proto3',
serialized_options=b'Z!infra/monorailv2/api/v3/api_proto',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1f\x61pi/v3/api_proto/projects.proto\x12\x0bmonorail.v3\x1a\x1bgoogle/protobuf/empty.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a&api/v3/api_proto/project_objects.proto\"t\n\x15\x43reateFieldDefRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xe0\x41\x02\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\x12,\n\x08\x66ielddef\x18\x02 \x01(\x0b\x32\x15.monorail.v3.FieldDefB\x03\xe0\x41\x02\"J\n\x16GetComponentDefRequest\x12\x30\n\x04name\x18\x01 \x01(\tB\"\xfa\x41\x1c\n\x1a\x61pi.crbug.com/ComponentDef\xe0\x41\x02\"\x81\x01\n\x19\x43reateComponentDefRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xe0\x41\x02\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\x12\x35\n\rcomponent_def\x18\x02 \x01(\x0b\x32\x19.monorail.v3.ComponentDefB\x03\xe0\x41\x02\"M\n\x19\x44\x65leteComponentDefRequest\x12\x30\n\x04name\x18\x01 \x01(\tB\"\xe0\x41\x02\xfa\x41\x1c\n\x1a\x61pi.crbug.com/ComponentDef\"q\n\x19ListIssueTemplatesRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"d\n\x1aListIssueTemplatesResponse\x12-\n\ttemplates\x18\x01 \x03(\x0b\x32\x1a.monorail.v3.IssueTemplate\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"p\n\x18ListComponentDefsRequest\x12-\n\x06parent\x18\x01 \x01(\tB\x1d\xfa\x41\x17\n\x15\x61pi.crbug.com/Project\xe0\x41\x02\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"g\n\x19ListComponentDefsResponse\x12\x31\n\x0e\x63omponent_defs\x18\x01 \x03(\x0b\x32\x19.monorail.v3.ComponentDef\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\"<\n\x13ListProjectsRequest\x12\x11\n\tpage_size\x18\x01 \x01(\x05\x12\x12\n\npage_token\x18\x02 \x01(\t\"W\n\x14ListProjectsResponse\x12&\n\x08projects\x18\x01 \x03(\x0b\x32\x14.monorail.v3.Project\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\x87\x05\n\x08Projects\x12M\n\x0e\x43reateFieldDef\x12\".monorail.v3.CreateFieldDefRequest\x1a\x15.monorail.v3.FieldDef\"\x00\x12S\n\x0fGetComponentDef\x12#.monorail.v3.GetComponentDefRequest\x1a\x19.monorail.v3.ComponentDef\"\x00\x12Y\n\x12\x43reateComponentDef\x12&.monorail.v3.CreateComponentDefRequest\x1a\x19.monorail.v3.ComponentDef\"\x00\x12V\n\x12\x44\x65leteComponentDef\x12&.monorail.v3.DeleteComponentDefRequest\x1a\x16.google.protobuf.Empty\"\x00\x12g\n\x12ListIssueTemplates\x12&.monorail.v3.ListIssueTemplatesRequest\x1a\'.monorail.v3.ListIssueTemplatesResponse\"\x00\x12\x64\n\x11ListComponentDefs\x12%.monorail.v3.ListComponentDefsRequest\x1a&.monorail.v3.ListComponentDefsResponse\"\x00\x12U\n\x0cListProjects\x12 .monorail.v3.ListProjectsRequest\x1a!.monorail.v3.ListProjectsResponse\"\x00\x42#Z!infra/monorailv2/api/v3/api_protob\x06proto3'
,
dependencies=[google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,api_dot_v3_dot_api__proto_dot_project__objects__pb2.DESCRIPTOR,])
_CREATEFIELDDEFREQUEST = _descriptor.Descriptor(
name='CreateFieldDefRequest',
full_name='monorail.v3.CreateFieldDefRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='monorail.v3.CreateFieldDefRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A\027\n\025api.crbug.com/Project', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='fielddef', full_name='monorail.v3.CreateFieldDefRequest.fielddef', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=177,
serialized_end=293,
)
_GETCOMPONENTDEFREQUEST = _descriptor.Descriptor(
name='GetComponentDefRequest',
full_name='monorail.v3.GetComponentDefRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='monorail.v3.GetComponentDefRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372A\034\n\032api.crbug.com/ComponentDef\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=295,
serialized_end=369,
)
_CREATECOMPONENTDEFREQUEST = _descriptor.Descriptor(
name='CreateComponentDefRequest',
full_name='monorail.v3.CreateComponentDefRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='monorail.v3.CreateComponentDefRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A\027\n\025api.crbug.com/Project', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='component_def', full_name='monorail.v3.CreateComponentDefRequest.component_def', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=372,
serialized_end=501,
)
_DELETECOMPONENTDEFREQUEST = _descriptor.Descriptor(
name='DeleteComponentDefRequest',
full_name='monorail.v3.DeleteComponentDefRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='monorail.v3.DeleteComponentDefRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\340A\002\372A\034\n\032api.crbug.com/ComponentDef', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=503,
serialized_end=580,
)
_LISTISSUETEMPLATESREQUEST = _descriptor.Descriptor(
name='ListIssueTemplatesRequest',
full_name='monorail.v3.ListIssueTemplatesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='monorail.v3.ListIssueTemplatesRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372A\027\n\025api.crbug.com/Project\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='monorail.v3.ListIssueTemplatesRequest.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='monorail.v3.ListIssueTemplatesRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=582,
serialized_end=695,
)
_LISTISSUETEMPLATESRESPONSE = _descriptor.Descriptor(
name='ListIssueTemplatesResponse',
full_name='monorail.v3.ListIssueTemplatesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='templates', full_name='monorail.v3.ListIssueTemplatesResponse.templates', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='monorail.v3.ListIssueTemplatesResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=697,
serialized_end=797,
)
_LISTCOMPONENTDEFSREQUEST = _descriptor.Descriptor(
name='ListComponentDefsRequest',
full_name='monorail.v3.ListComponentDefsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='monorail.v3.ListComponentDefsRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372A\027\n\025api.crbug.com/Project\340A\002', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_size', full_name='monorail.v3.ListComponentDefsRequest.page_size', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='monorail.v3.ListComponentDefsRequest.page_token', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=799,
serialized_end=911,
)
_LISTCOMPONENTDEFSRESPONSE = _descriptor.Descriptor(
name='ListComponentDefsResponse',
full_name='monorail.v3.ListComponentDefsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='component_defs', full_name='monorail.v3.ListComponentDefsResponse.component_defs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='monorail.v3.ListComponentDefsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=913,
serialized_end=1016,
)
_LISTPROJECTSREQUEST = _descriptor.Descriptor(
name='ListProjectsRequest',
full_name='monorail.v3.ListProjectsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='page_size', full_name='monorail.v3.ListProjectsRequest.page_size', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='page_token', full_name='monorail.v3.ListProjectsRequest.page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1018,
serialized_end=1078,
)
_LISTPROJECTSRESPONSE = _descriptor.Descriptor(
name='ListProjectsResponse',
full_name='monorail.v3.ListProjectsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='projects', full_name='monorail.v3.ListProjectsResponse.projects', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='monorail.v3.ListProjectsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1080,
serialized_end=1167,
)
_CREATEFIELDDEFREQUEST.fields_by_name['fielddef'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._FIELDDEF
_CREATECOMPONENTDEFREQUEST.fields_by_name['component_def'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF
_LISTISSUETEMPLATESRESPONSE.fields_by_name['templates'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._ISSUETEMPLATE
_LISTCOMPONENTDEFSRESPONSE.fields_by_name['component_defs'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF
_LISTPROJECTSRESPONSE.fields_by_name['projects'].message_type = api_dot_v3_dot_api__proto_dot_project__objects__pb2._PROJECT
DESCRIPTOR.message_types_by_name['CreateFieldDefRequest'] = _CREATEFIELDDEFREQUEST
DESCRIPTOR.message_types_by_name['GetComponentDefRequest'] = _GETCOMPONENTDEFREQUEST
DESCRIPTOR.message_types_by_name['CreateComponentDefRequest'] = _CREATECOMPONENTDEFREQUEST
DESCRIPTOR.message_types_by_name['DeleteComponentDefRequest'] = _DELETECOMPONENTDEFREQUEST
DESCRIPTOR.message_types_by_name['ListIssueTemplatesRequest'] = _LISTISSUETEMPLATESREQUEST
DESCRIPTOR.message_types_by_name['ListIssueTemplatesResponse'] = _LISTISSUETEMPLATESRESPONSE
DESCRIPTOR.message_types_by_name['ListComponentDefsRequest'] = _LISTCOMPONENTDEFSREQUEST
DESCRIPTOR.message_types_by_name['ListComponentDefsResponse'] = _LISTCOMPONENTDEFSRESPONSE
DESCRIPTOR.message_types_by_name['ListProjectsRequest'] = _LISTPROJECTSREQUEST
DESCRIPTOR.message_types_by_name['ListProjectsResponse'] = _LISTPROJECTSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
CreateFieldDefRequest = _reflection.GeneratedProtocolMessageType('CreateFieldDefRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATEFIELDDEFREQUEST,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.CreateFieldDefRequest)
})
_sym_db.RegisterMessage(CreateFieldDefRequest)
GetComponentDefRequest = _reflection.GeneratedProtocolMessageType('GetComponentDefRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCOMPONENTDEFREQUEST,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.GetComponentDefRequest)
})
_sym_db.RegisterMessage(GetComponentDefRequest)
CreateComponentDefRequest = _reflection.GeneratedProtocolMessageType('CreateComponentDefRequest', (_message.Message,), {
'DESCRIPTOR' : _CREATECOMPONENTDEFREQUEST,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.CreateComponentDefRequest)
})
_sym_db.RegisterMessage(CreateComponentDefRequest)
DeleteComponentDefRequest = _reflection.GeneratedProtocolMessageType('DeleteComponentDefRequest', (_message.Message,), {
'DESCRIPTOR' : _DELETECOMPONENTDEFREQUEST,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.DeleteComponentDefRequest)
})
_sym_db.RegisterMessage(DeleteComponentDefRequest)
ListIssueTemplatesRequest = _reflection.GeneratedProtocolMessageType('ListIssueTemplatesRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTISSUETEMPLATESREQUEST,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.ListIssueTemplatesRequest)
})
_sym_db.RegisterMessage(ListIssueTemplatesRequest)
ListIssueTemplatesResponse = _reflection.GeneratedProtocolMessageType('ListIssueTemplatesResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTISSUETEMPLATESRESPONSE,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.ListIssueTemplatesResponse)
})
_sym_db.RegisterMessage(ListIssueTemplatesResponse)
ListComponentDefsRequest = _reflection.GeneratedProtocolMessageType('ListComponentDefsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTCOMPONENTDEFSREQUEST,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.ListComponentDefsRequest)
})
_sym_db.RegisterMessage(ListComponentDefsRequest)
ListComponentDefsResponse = _reflection.GeneratedProtocolMessageType('ListComponentDefsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTCOMPONENTDEFSRESPONSE,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.ListComponentDefsResponse)
})
_sym_db.RegisterMessage(ListComponentDefsResponse)
ListProjectsRequest = _reflection.GeneratedProtocolMessageType('ListProjectsRequest', (_message.Message,), {
'DESCRIPTOR' : _LISTPROJECTSREQUEST,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.ListProjectsRequest)
})
_sym_db.RegisterMessage(ListProjectsRequest)
ListProjectsResponse = _reflection.GeneratedProtocolMessageType('ListProjectsResponse', (_message.Message,), {
'DESCRIPTOR' : _LISTPROJECTSRESPONSE,
'__module__' : 'api.v3.api_proto.projects_pb2'
# @@protoc_insertion_point(class_scope:monorail.v3.ListProjectsResponse)
})
_sym_db.RegisterMessage(ListProjectsResponse)
DESCRIPTOR._options = None
_CREATEFIELDDEFREQUEST.fields_by_name['parent']._options = None
_CREATEFIELDDEFREQUEST.fields_by_name['fielddef']._options = None
_GETCOMPONENTDEFREQUEST.fields_by_name['name']._options = None
_CREATECOMPONENTDEFREQUEST.fields_by_name['parent']._options = None
_CREATECOMPONENTDEFREQUEST.fields_by_name['component_def']._options = None
_DELETECOMPONENTDEFREQUEST.fields_by_name['name']._options = None
_LISTISSUETEMPLATESREQUEST.fields_by_name['parent']._options = None
_LISTCOMPONENTDEFSREQUEST.fields_by_name['parent']._options = None
_PROJECTS = _descriptor.ServiceDescriptor(
name='Projects',
full_name='monorail.v3.Projects',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=1170,
serialized_end=1817,
methods=[
_descriptor.MethodDescriptor(
name='CreateFieldDef',
full_name='monorail.v3.Projects.CreateFieldDef',
index=0,
containing_service=None,
input_type=_CREATEFIELDDEFREQUEST,
output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._FIELDDEF,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='GetComponentDef',
full_name='monorail.v3.Projects.GetComponentDef',
index=1,
containing_service=None,
input_type=_GETCOMPONENTDEFREQUEST,
output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='CreateComponentDef',
full_name='monorail.v3.Projects.CreateComponentDef',
index=2,
containing_service=None,
input_type=_CREATECOMPONENTDEFREQUEST,
output_type=api_dot_v3_dot_api__proto_dot_project__objects__pb2._COMPONENTDEF,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='DeleteComponentDef',
full_name='monorail.v3.Projects.DeleteComponentDef',
index=3,
containing_service=None,
input_type=_DELETECOMPONENTDEFREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListIssueTemplates',
full_name='monorail.v3.Projects.ListIssueTemplates',
index=4,
containing_service=None,
input_type=_LISTISSUETEMPLATESREQUEST,
output_type=_LISTISSUETEMPLATESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListComponentDefs',
full_name='monorail.v3.Projects.ListComponentDefs',
index=5,
containing_service=None,
input_type=_LISTCOMPONENTDEFSREQUEST,
output_type=_LISTCOMPONENTDEFSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='ListProjects',
full_name='monorail.v3.Projects.ListProjects',
index=6,
containing_service=None,
input_type=_LISTPROJECTSREQUEST,
output_type=_LISTPROJECTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_PROJECTS)
DESCRIPTOR.services_by_name['Projects'] = _PROJECTS
# @@protoc_insertion_point(module_scope)
| 43.240924 | 2,750 | 0.781942 |
4c03d0743c0121e9d0de50ceaa47b8661683af6f
| 2,207 |
py
|
Python
|
tests/test_device.py
|
michaelwoods/home-assistant-cli
|
340643af943f36283621f39ac39a690b1fccc045
|
[
"Apache-2.0"
] | null | null | null |
tests/test_device.py
|
michaelwoods/home-assistant-cli
|
340643af943f36283621f39ac39a690b1fccc045
|
[
"Apache-2.0"
] | null | null | null |
tests/test_device.py
|
michaelwoods/home-assistant-cli
|
340643af943f36283621f39ac39a690b1fccc045
|
[
"Apache-2.0"
] | null | null | null |
"""Testing Device operations."""
import json
import unittest.mock as mock
from click.testing import CliRunner
import homeassistant_cli.cli as cli
def test_device_list(default_devices) -> None:
"""Test Device List."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["--output=json", "device", "list"],
catch_exceptions=False,
)
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data) == 23
def test_device_list_filter(default_devices) -> None:
"""Test Device List."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["--output=json", "device", "list", "table"],
catch_exceptions=False,
)
assert result.exit_code == 0
data = json.loads(result.output)
assert len(data) == 2
assert data[0]['name'] == "Kitchen table left"
assert data[1]['name'] == "Kitchen table right"
def test_device_assign(default_areas, default_devices) -> None:
"""Test basic device assign."""
with mock.patch(
'homeassistant_cli.remote.get_devices', return_value=default_devices
):
with mock.patch(
'homeassistant_cli.remote.get_areas', return_value=default_areas
):
with mock.patch(
'homeassistant_cli.remote.assign_area',
return_value={'success': True},
):
runner = CliRunner()
result = runner.invoke(
cli.cli,
["device", "assign", "Kitchen", "Kitchen table left"],
catch_exceptions=False,
)
print(result.output)
assert result.exit_code == 0
expected = (
"Successfully assigned 'Kitchen'"
" to 'Kitchen table left'\n"
)
assert result.output == expected
| 29.824324 | 76 | 0.557771 |
4c03f5083b7da646254c6bd784cf88ab749969d1
| 4,350 |
py
|
Python
|
widgets/tree_item.py
|
tarsa129/j3d-animation-editor
|
3f0691bd7dcece6e2055a0b5af0510608f28f2ca
|
[
"MIT"
] | 6 |
2020-08-10T13:09:03.000Z
|
2021-11-20T02:37:46.000Z
|
widgets/tree_item.py
|
tarsa129/j3d-animation-editor
|
3f0691bd7dcece6e2055a0b5af0510608f28f2ca
|
[
"MIT"
] | 3 |
2021-02-16T06:20:23.000Z
|
2022-02-24T21:43:41.000Z
|
widgets/tree_item.py
|
tarsa129/j3d-animation-editor
|
3f0691bd7dcece6e2055a0b5af0510608f28f2ca
|
[
"MIT"
] | 2 |
2021-02-16T05:02:04.000Z
|
2021-12-17T16:11:10.000Z
|
from PyQt5.QtWidgets import QAction, QTreeWidget, QTreeWidgetItem, QFileDialog
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import Qt
import animations.general_animation as j3d
from widgets.yaz0 import compress, compress_slow, compress_fast
from io import BytesIO
| 37.5 | 116 | 0.55931 |
4c040273405e24f9a3249bb42b05984c6988f41a
| 3,445 |
py
|
Python
|
Wheels.py
|
edhosken/WheelsSong
|
cb988c8510a1095eeec3a2399b0fc0ba24bfa648
|
[
"MIT"
] | null | null | null |
Wheels.py
|
edhosken/WheelsSong
|
cb988c8510a1095eeec3a2399b0fc0ba24bfa648
|
[
"MIT"
] | null | null | null |
Wheels.py
|
edhosken/WheelsSong
|
cb988c8510a1095eeec3a2399b0fc0ba24bfa648
|
[
"MIT"
] | null | null | null |
#Create the pre-defined song values and empty variables...Correct names not used so each starting letter would be unique
numbers = (1 ,2 ,3 ,4 ,5 ,6 ,7 ,8 ,9 ,10 ,11 ,12 ,13 ,14 ,15 ,16 ,17 ,18 )
letters = ['a ','b ','c ','d ','e ','f ','g ','h ','i ','j ','k ','l ','m ','n ','o ','p ','q ','r ']
roman = ['I ', 'II ', 'III ', 'IV ', 'V ', 'VI ', 'VII ', 'VIII ', 'IX ', 'X ', 'XI ', 'XII ', 'XIII ', 'XIV ', 'XV ', 'XVI ', 'XVII ', 'XVIII']
military = ['alpha ', 'bravo ', 'charlie ', 'delta ', 'echo ', 'foxtrot ', 'golf ', 'hotel ', 'india ', 'juliet ', 'kilo ', 'lima ', 'mike ', 'november ', 'oscar ', 'papa ', 'quebec ', 'romeo ']
german = ['eins', 'zwei', 'drei', 'vier', 'fnf', 'sechs', 'sieben', 'acht', 'neun', 'zehn', 'elf', 'zwlf', 'dreizehn', 'vierzehn', 'fnfzehn', 'sechzehn', 'siebzehn', 'achtzehn']
pi = ['3 ','point ','1 ','4 ','1 ','5 ','9 ','2 ','6 ','5 ','3 ','5 ','8 ','9 ','7 ','9 ','3 ','2 ']
##Build morse code sequences
t = 'dot'
s = 'dash'
m1 = t, s, s, s, s
m2 = t, t, s, s, s
m3 = t, t, t, s, s
m4 = t, t, t, t, s
m5 = t, t, t, t, t
m6 = s, t, t, t, t
m7 = s, s, t, t, t
m8 = s, s, s, t, t
m9 = s, s, s, s, t
m0 = s, s, s, s, s
code = [m1, m2, m3, m4, m5, m6, m7, m8, m9, m1 + m0, m1 + m1, m1 + m2, m1 + m3, m1 + m4, m1 + m5, m1 + m6, m1 + m7, m1 + m8]
##Other ideas: piglatin, japanese, spanish, prime, tau, e, ...
##NEED TO ADD INVALID ENTRY CATCHES
print("Hello, let's sing a song that everybody loves!\n")
sing = 'y'
while sing == 'y':
user = []
variation = input ("Please input what variation you wish to perform be entering 'numbers', 'letters', 'roman', 'military', 'pi', 'german', 'code', or 'user' to make your own song: \n").lower().strip()
##Seeming silly switching of strings to list types
if variation == "numbers" or variation == "n":
variation = numbers
elif variation == "letters" or variation == "l":
variation = letters
elif variation == "roman" or variation == "r":
variation = roman
elif variation == "military" or variation == "m":
variation = military
elif variation == "pi" or variation == "p":
variation = pi
elif variation == "german" or variation == "g":
variation = german
elif variation == "code" or variation == "c":
variation = code
elif variation == "user" or variation == "u":
while len(user) < 18:
user.append(input ("Enter a word: "))
#User input to select the song pattern
pattern = input ("\nNow please tell me what pattern to use by entering 'forward', 'backward', 'even', or 'odd':\n")
print ("\nHere we go: \n\n")
#Asemble the song...IMPROVE FORMAT SO OUTPUT IS EASIER TO READ
song1 = "Oh, there are "
song2 = " wheels on a big rig truck!"
a = song1, variation[::], song2
b = song1, variation[::-1], song2
c = song1, variation[::2], song2
d = song1, variation[1::2], song2
##Use pattern.startswith()?...Also, might be better to seperate forward/backward and even/odd choices.
if pattern == 'forward' or pattern == 'f':
print (a)
elif pattern == 'backward' or pattern == 'b':
print (b)
elif pattern == 'odd' or pattern == 'o':
print (c)
elif pattern == 'even' or pattern == 'e':
print (d)
sing = input('\n\nWould you like to sing it again? (y/n) ').lower()
## This is the end of the while loop
else:
print ("\nOK, Goodbye!")
| 37.445652 | 204 | 0.54688 |
4c0417fc2a324560f940489498afd9c4d64ac7c7
| 15,792 |
py
|
Python
|
tests/test_config.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_config.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_config.py
|
dfroger/conda
|
c0f99ff46b217d081501e66f4dcd7bcdb5d9c6aa
|
[
"BSD-3-Clause"
] | null | null | null |
# (c) 2012-2014 Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
import os
from os.path import dirname, join, exists
import unittest
import pytest
import conda.config as config
from conda.utils import get_yaml
from conda.compat import iterkeys
from tests.helpers import run_conda_command
yaml = get_yaml()
# use condarc from source tree to run these tests against
config.rc_path = join(dirname(__file__), 'condarc')
config.get_default_urls = _get_default_urls
# unset CIO_TEST. This is a Continuum-internal variable that draws packages from an internal server instead of
# repo.continuum.io
try:
del os.environ['CIO_TEST']
except KeyError:
pass
test_condarc = os.path.join(os.path.dirname(__file__), 'test_condarc')
# Tests for the conda config command
# FIXME This shoiuld be multiple individual tests
# FIXME Break into multiple tests
# FIXME Break into multiple tests
# FIXME Break into multiple tests
# FIXME Break into multiple tests
def test_invalid_rc():
# Some tests for unexpected input in the condarc, like keys that are the
# wrong type
try:
condarc = """\
channels:
"""
with open(test_condarc, 'w') as f:
f.write(condarc)
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--add', 'channels', 'test')
assert stdout == ''
assert stderr == """\
Error: Could not parse the yaml file. Use -f to use the
yaml parser (this will remove any structure or comments from the existing
.condarc file). Reason: key 'channels' should be a list, not NoneType."""
assert _read_test_condarc() == condarc
os.unlink(test_condarc)
finally:
try:
pass
os.unlink(test_condarc)
except OSError:
pass
def test_config_set():
# Test the config set command
# Make sure it accepts only boolean values for boolean keys and any value for string keys
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'always_yes', 'yep')
assert stdout == ''
assert stderr == 'Error: Key: always_yes; yep is not a YAML boolean.'
finally:
try:
os.unlink(test_condarc)
except OSError:
pass
def test_set_rc_string():
# Test setting string keys in .condarc
# We specifically test ssl_verify since it can be either a boolean or a string
try:
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'ssl_verify', 'yes')
assert stdout == ''
assert stderr == ''
verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
assert verify == 'yes'
stdout, stderr = run_conda_command('config', '--file', test_condarc,
'--set', 'ssl_verify', 'test_string.crt')
assert stdout == ''
assert stderr == ''
verify = yaml.load(open(test_condarc, 'r'), Loader=yaml.RoundTripLoader)['ssl_verify']
assert verify == 'test_string.crt'
os.unlink(test_condarc)
finally:
try:
os.unlink(test_condarc)
except OSError:
pass
| 30.31094 | 111 | 0.597138 |
4c045d0953c279b203d260f5d6f3f9a0b7bdf019
| 3,579 |
py
|
Python
|
malaya/transformers/babble.py
|
ahmed3991/malaya
|
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
|
[
"MIT"
] | 1 |
2021-03-19T22:42:34.000Z
|
2021-03-19T22:42:34.000Z
|
malaya/transformers/babble.py
|
ahmed3991/malaya
|
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
|
[
"MIT"
] | null | null | null |
malaya/transformers/babble.py
|
ahmed3991/malaya
|
d90be6d5b2a1393a3f8b8b1ffa8ae676cdaa083c
|
[
"MIT"
] | null | null | null |
# Bert has a Mouth, and It Must Speak: BERT as a Markov Random Field Language Model,
# by Alex Wang, Kyunghyun Cho, NeuralGen 2019
# https://colab.research.google.com/drive/1MxKZGtQ9SSBjTK5ArsZ5LKhkztzg52RV
# https://arxiv.org/abs/1902.04094
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
import math
from malaya.text.bpe import merge_sentencepiece_tokens, merge_wordpiece_tokens
CLS = '[CLS]'
SEP = '[SEP]'
MASK = '[MASK]'
| 28.632 | 84 | 0.626991 |
4c045e92df54148ce6ef4110afe95ac625400e40
| 652 |
py
|
Python
|
coding patterns/two pointers/sortedarr_square.py
|
mkoryor/Python
|
837ec4c03130dc4cb919fb5f1eeb4d31206790e4
|
[
"Unlicense"
] | null | null | null |
coding patterns/two pointers/sortedarr_square.py
|
mkoryor/Python
|
837ec4c03130dc4cb919fb5f1eeb4d31206790e4
|
[
"Unlicense"
] | null | null | null |
coding patterns/two pointers/sortedarr_square.py
|
mkoryor/Python
|
837ec4c03130dc4cb919fb5f1eeb4d31206790e4
|
[
"Unlicense"
] | null | null | null |
"""
[E] Given a sorted array, create a new array containing squares of all the
number of the input array in the sorted order.
Input: [-2, -1, 0, 2, 3]
Output: [0, 1, 4, 4, 9]
"""
# Time: O(N) Space: O(n)
| 19.757576 | 75 | 0.619632 |
4c060ddce60243c22acf2298bfd181a17b757f40
| 1,917 |
py
|
Python
|
modules/evaluate/evaluate_step.py
|
Azure/aml-object-classification-pipeline
|
f94e4327ebfb5534b52c5c70e82832a86c64a2d1
|
[
"MIT"
] | 5 |
2020-05-20T12:41:31.000Z
|
2022-03-18T17:35:26.000Z
|
modules/evaluate/evaluate_step.py
|
Azure/aml-object-classification-pipeline
|
f94e4327ebfb5534b52c5c70e82832a86c64a2d1
|
[
"MIT"
] | null | null | null |
modules/evaluate/evaluate_step.py
|
Azure/aml-object-classification-pipeline
|
f94e4327ebfb5534b52c5c70e82832a86c64a2d1
|
[
"MIT"
] | 5 |
2020-06-03T12:19:20.000Z
|
2021-12-30T02:58:06.000Z
|
import os
from azureml.pipeline.steps import PythonScriptStep
from azureml.core.runconfig import RunConfiguration
from azureml.core.conda_dependencies import CondaDependencies
from azureml.pipeline.core import PipelineData
from azureml.pipeline.core import PipelineParameter
from azureml.pipeline.steps import EstimatorStep
from azureml.train.dnn import PyTorch
def evaluate_step(model_dir, test_dir, compute_target):
'''
This step evaluates the trained model on the testing data and outputs the accuracy.
:param model_dir: The reference to the directory containing the trained model
:type model_dir: DataReference
:param test_dir: The reference to the directory containing the testing data
:type test_dir: DataReference
:param compute_target: The compute target to run the step on
:type compute_target: ComputeTarget
:return: The preprocess step, step outputs dictionary (keys: accuracy_file)
:rtype: EstimatorStep, dict
'''
accuracy_file = PipelineData(
name='accuracy_file',
pipeline_output_name='accuracy_file',
datastore=test_dir.datastore,
output_mode='mount',
is_directory=False)
outputs = [accuracy_file]
outputs_map = { 'accuracy_file': accuracy_file }
estimator = PyTorch(
source_directory=os.path.dirname(os.path.abspath(__file__)),
entry_script='evaluate.py',
framework_version='1.3',
compute_target=compute_target,
use_gpu=True)
step = EstimatorStep(
name="Evaluate Model",
estimator=estimator,
estimator_entry_script_arguments=[
'--test_dir', test_dir,
'--model_dir', model_dir,
'--accuracy_file', accuracy_file
],
inputs=[model_dir, test_dir],
outputs=outputs,
compute_target=compute_target,
allow_reuse=True)
return step, outputs_map
| 34.232143 | 87 | 0.708399 |
4c0955be4956893e543176a5a678b0a7caa5514d
| 4,806 |
py
|
Python
|
configs/mobilenet_cfbi.py
|
yoxu515/CFBI
|
0bab1e3c9fc3e3ba0629f716d60221e8f8d9d586
|
[
"BSD-3-Clause"
] | 312 |
2020-03-15T03:51:52.000Z
|
2022-03-23T07:33:39.000Z
|
configs/mobilenet_cfbi.py
|
geekJZY/CFBI
|
90a0cd6a3e7961f47f266c7620e8dc281dc43ac8
|
[
"BSD-3-Clause"
] | 55 |
2020-06-27T06:39:27.000Z
|
2022-03-24T19:02:15.000Z
|
configs/mobilenet_cfbi.py
|
geekJZY/CFBI
|
90a0cd6a3e7961f47f266c7620e8dc281dc43ac8
|
[
"BSD-3-Clause"
] | 41 |
2020-07-28T00:52:04.000Z
|
2022-03-25T08:49:47.000Z
|
import torch
import argparse
import os
import sys
import cv2
import time
cfg = Configuration()
| 36.687023 | 123 | 0.645651 |
4c09b0745eee677f40540659a3c584b6e7535d0a
| 968 |
py
|
Python
|
js/matrixjs/matrix_compile.py
|
kennytilton/ConnectJS
|
a16121052839b6f447718dccb008761d92094885
|
[
"MIT"
] | 7 |
2017-07-31T20:28:33.000Z
|
2020-11-23T13:18:20.000Z
|
js/matrixjs/matrix_compile.py
|
kennytilton/ConnectJS
|
a16121052839b6f447718dccb008761d92094885
|
[
"MIT"
] | null | null | null |
js/matrixjs/matrix_compile.py
|
kennytilton/ConnectJS
|
a16121052839b6f447718dccb008761d92094885
|
[
"MIT"
] | 1 |
2020-02-26T06:09:33.000Z
|
2020-02-26T06:09:33.000Z
|
#!/usr/bin/python2.4
import httplib, urllib, sys
# Define the parameters for the POST request and encode them in
# a URL-safe format.
params = urllib.urlencode([
#('js_code', sys.argv[1]),
('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Cells.js'),
('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Matrix/Model.js'),
('code_url', 'https://raw.githubusercontent.com/kennytilton/MatrixJS/master/js/matrixjs/js/Tag.js'),
('compilation_level', 'ADVANCED_OPTIMIZATIONS'),
('output_format', 'text'),
('output_info', 'warnings'),
])
# Always use the following value for the Content-type header.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', params, headers)
response = conn.getresponse()
data = response.read()
print data
conn.close()
| 38.72 | 113 | 0.72314 |
4c09c2107e354abe29a0559333bd163e132e44d0
| 4,551 |
py
|
Python
|
tensorflow/python/util/tf_should_use_test.py
|
npow/tensorflow
|
99ae68bba52bb6338af06f37bb104128d7af6fb4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/util/tf_should_use_test.py
|
npow/tensorflow
|
99ae68bba52bb6338af06f37bb104128d7af6fb4
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/python/util/tf_should_use_test.py
|
npow/tensorflow
|
99ae68bba52bb6338af06f37bb104128d7af6fb4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for tf_should_use."""
# pylint: disable=unused-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import sys
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.util import tf_should_use
if __name__ == '__main__':
test.main()
| 33.463235 | 80 | 0.702703 |
4c0a433f8f2a1c5fe05d98092959a53a97b1beea
| 8,767 |
bzl
|
Python
|
tools/jdk/local_java_repository.bzl
|
loongarch64/bazel
|
44c30aceec076a0c25f506508704df0b9aeb6578
|
[
"Apache-2.0"
] | 16,989 |
2015-09-01T19:57:15.000Z
|
2022-03-31T23:54:00.000Z
|
tools/jdk/local_java_repository.bzl
|
loongarch64/bazel
|
44c30aceec076a0c25f506508704df0b9aeb6578
|
[
"Apache-2.0"
] | 12,562 |
2015-09-01T09:06:01.000Z
|
2022-03-31T22:26:20.000Z
|
tools/jdk/local_java_repository.bzl
|
loongarch64/bazel
|
44c30aceec076a0c25f506508704df0b9aeb6578
|
[
"Apache-2.0"
] | 3,707 |
2015-09-02T19:20:01.000Z
|
2022-03-31T17:06:14.000Z
|
# Copyright 2020 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rules for importing and registering a local JDK."""
load(":default_java_toolchain.bzl", "JVM8_TOOLCHAIN_CONFIGURATION", "default_java_toolchain")
def local_java_runtime(name, java_home, version, runtime_name = None, visibility = ["//visibility:public"]):
"""Defines a java_runtime target together with Java runtime and compile toolchain definitions.
Java runtime toolchain is constrained by flag --java_runtime_version having
value set to either name or version argument.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation. This requires a different configuration for JDK8
than the newer versions.
Args:
name: name of the target.
java_home: Path to the JDK.
version: Version of the JDK.
runtime_name: name of java_runtime target if it already exists.
visibility: Visibility that will be applied to the java runtime target
"""
if runtime_name == None:
runtime_name = name
native.java_runtime(
name = runtime_name,
java_home = java_home,
visibility = visibility,
)
native.config_setting(
name = name + "_name_setting",
values = {"java_runtime_version": name},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_version_setting",
values = {"java_runtime_version": version},
visibility = ["//visibility:private"],
)
native.config_setting(
name = name + "_name_version_setting",
values = {"java_runtime_version": name + "_" + version},
visibility = ["//visibility:private"],
)
native.alias(
name = name + "_settings_alias",
actual = select({
name + "_name_setting": name + "_name_setting",
name + "_version_setting": name + "_version_setting",
"//conditions:default": name + "_name_version_setting",
}),
visibility = ["//visibility:private"],
)
native.toolchain(
name = "runtime_toolchain_definition",
target_settings = [":%s_settings_alias" % name],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = runtime_name,
)
if version == "8":
default_java_toolchain(
name = name + "_toolchain_java8",
configuration = JVM8_TOOLCHAIN_CONFIGURATION,
source_version = version,
target_version = version,
java_runtime = runtime_name,
)
elif type(version) == type("") and version.isdigit() and int(version) > 8:
for version in range(8, int(version) + 1):
default_java_toolchain(
name = name + "_toolchain_java" + str(version),
source_version = str(version),
target_version = str(version),
java_runtime = runtime_name,
)
# else version is not recognized and no compilation toolchains are predefined
def _local_java_repository_impl(repository_ctx):
"""Repository rule local_java_repository implementation.
Args:
repository_ctx: repository context
"""
java_home = repository_ctx.attr.java_home
java_home_path = repository_ctx.path(java_home)
if not java_home_path.exists:
fail('The path indicated by the "java_home" attribute "%s" (absolute: "%s") ' +
"does not exist." % (java_home, str(java_home_path)))
repository_ctx.file(
"WORKSPACE",
"# DO NOT EDIT: automatically generated WORKSPACE file for local_java_repository\n" +
"workspace(name = \"{name}\")\n".format(name = repository_ctx.name),
)
extension = ".exe" if repository_ctx.os.name.lower().find("windows") != -1 else ""
java_bin = java_home_path.get_child("bin").get_child("java" + extension)
if not java_bin.exists:
# Java binary does not exist
repository_ctx.file(
"BUILD.bazel",
_NOJDK_BUILD_TPL.format(
local_jdk = repository_ctx.name,
java_binary = "bin/java" + extension,
java_home = java_home,
),
False,
)
return
# Detect version
version = repository_ctx.attr.version if repository_ctx.attr.version != "" else _detect_java_version(repository_ctx, java_bin)
# Prepare BUILD file using "local_java_runtime" macro
build_file = ""
if repository_ctx.attr.build_file != None:
build_file = repository_ctx.read(repository_ctx.path(repository_ctx.attr.build_file))
runtime_name = '"jdk"' if repository_ctx.attr.build_file else None
local_java_runtime_macro = """
local_java_runtime(
name = "%s",
runtime_name = %s,
java_home = "%s",
version = "%s",
)
""" % (repository_ctx.name, runtime_name, java_home, version)
repository_ctx.file(
"BUILD.bazel",
'load("@bazel_tools//tools/jdk:local_java_repository.bzl", "local_java_runtime")\n' +
build_file +
local_java_runtime_macro,
)
# Symlink all files
for file in repository_ctx.path(java_home).readdir():
repository_ctx.symlink(file, file.basename)
# Build file template, when JDK does not exist
_NOJDK_BUILD_TPL = '''load("@bazel_tools//tools/jdk:fail_rule.bzl", "fail_rule")
fail_rule(
name = "jdk",
header = "Auto-Configuration Error:",
message = ("Cannot find Java binary {java_binary} in {java_home}; either correct your JAVA_HOME, " +
"PATH or specify Java from remote repository (e.g. " +
"--java_runtime_version=remotejdk_11")
)
config_setting(
name = "localjdk_setting",
values = {{"java_runtime_version": "{local_jdk}"}},
visibility = ["//visibility:private"],
)
toolchain(
name = "runtime_toolchain_definition",
target_settings = [":localjdk_setting"],
toolchain_type = "@bazel_tools//tools/jdk:runtime_toolchain_type",
toolchain = ":jdk",
)
'''
_local_java_repository_rule = repository_rule(
implementation = _local_java_repository_impl,
local = True,
configure = True,
attrs = {
"java_home": attr.string(),
"version": attr.string(),
"build_file": attr.label(),
},
)
def local_java_repository(name, java_home, version = "", build_file = None):
"""Registers a runtime toolchain for local JDK and creates an unregistered compile toolchain.
Toolchain resolution is constrained with --java_runtime_version flag
having value of the "name" or "version" parameter.
Java compile toolchains are created for --java_language_version flags values
between 8 and version (inclusive). Java compile toolchains use the same
(local) JDK for compilation.
If there is no JDK "virtual" targets are created, which fail only when actually needed.
Args:
name: A unique name for this rule.
java_home: Location of the JDK imported.
build_file: optionally BUILD file template
version: optionally java version
"""
_local_java_repository_rule(name = name, java_home = java_home, version = version, build_file = build_file)
native.register_toolchains("@" + name + "//:runtime_toolchain_definition")
| 37.626609 | 130 | 0.666705 |
4c0ab106ed9ecd5a4593bfc5cb160cb433ae9bfc
| 2,563 |
py
|
Python
|
corehq/apps/fixtures/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | 1 |
2015-02-10T23:26:39.000Z
|
2015-02-10T23:26:39.000Z
|
corehq/apps/fixtures/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/fixtures/resources/v0_1.py
|
SEL-Columbia/commcare-hq
|
992ee34a679c37f063f86200e6df5a197d5e3ff6
|
[
"BSD-3-Clause"
] | null | null | null |
from couchdbkit import ResourceNotFound
from tastypie import fields as tp_f
from corehq.apps.api.resources import JsonResource
from corehq.apps.api.resources.v0_1 import (
CustomResourceMeta,
RequirePermissionAuthentication,
)
from corehq.apps.api.util import get_object_or_not_exist
from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType
from corehq.apps.users.models import Permissions
| 39.430769 | 79 | 0.673039 |
4c0ab34e213a6cac6f714e8bdb911bff09620f44
| 24,467 |
py
|
Python
|
tests/test_domain.py
|
broadinstitute/cert_manager_api
|
3a9c3445ff32ecd29ab47e7a049c47155b72614a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_domain.py
|
broadinstitute/cert_manager_api
|
3a9c3445ff32ecd29ab47e7a049c47155b72614a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_domain.py
|
broadinstitute/cert_manager_api
|
3a9c3445ff32ecd29ab47e7a049c47155b72614a
|
[
"BSD-3-Clause"
] | 1 |
2022-03-17T16:33:46.000Z
|
2022-03-17T16:33:46.000Z
|
# -*- coding: utf-8 -*-
"""Define the cert_manager.domain.Domain unit tests."""
# Don't warn about things that happen as that is part of unit testing
# pylint: disable=protected-access
# pylint: disable=no-member
import json
from requests.exceptions import HTTPError
from testtools import TestCase
import responses
from cert_manager.domain import Domain, DomainCreationResponseError
from .lib.testbase import ClientFixture
| 32.666222 | 107 | 0.647484 |
4c0acd1ef9075a9d08118479182c8461e04d6e01
| 3,844 |
py
|
Python
|
texts.py
|
ProtKsen/pgame
|
c4455c6c07eaf275f9fcfa661cd6933ee7b1ff92
|
[
"MIT"
] | 2 |
2021-04-14T09:49:27.000Z
|
2022-03-08T17:26:49.000Z
|
texts.py
|
ProtKsen/pgame
|
c4455c6c07eaf275f9fcfa661cd6933ee7b1ff92
|
[
"MIT"
] | null | null | null |
texts.py
|
ProtKsen/pgame
|
c4455c6c07eaf275f9fcfa661cd6933ee7b1ff92
|
[
"MIT"
] | 2 |
2021-01-11T12:09:26.000Z
|
2021-04-14T09:49:45.000Z
|
"""Text parts."""
SEPARATOR = '----------------------------------'
CONT_GAME = 'enter '
GREETING = ' '' ''!\n' \
' , ' \
' !'
NAME_QUESTION = ' ?'
CHOOSE_LEVEL = ' , ' \
' . \n' \
'1 - \n' \
'2 - \n' \
'3 - '
INTRODUCTION = ' , \n' \
' , \n' \
' 10 . \n' \
' . , , \n' \
' . \n' \
' , \n' \
' . \n\n' \
' - . \n\n' \
' , \n' \
' \n' \
', . \n\n' \
'!!! ,\n' \
' . !!!'
ORACLE_QUESTION = ' . \n' \
' \n' \
' . ?\n' \
'----------------------------------\n'\
'1 - , \n' \
'2 - , '
ORACLE_QUESTION_1 = ' ? \n' \
'----------------------------------\n'\
'1 - , ! \n'\
'2 - ? (1 ) \n'\
'3 - ? (1 ) \n'\
'4 - ? (1 ) \n'\
'5 - (3 )'
ORACLE_QUESTION_2 = ' ? \n' \
'----------------------------------\n'\
'1 - , ! \n'\
'2 - ? (1 ) \n'\
'3 - ? (1 ) \n'\
'4 - ? (1 )'
GO_TAVERN_TEXT = '! \n' \
', .'
EXIT_QUESTION = ' ?\n' \
'----------------------------------\n'\
'1 - \n' \
'2 - '
SUCCESS_STEP = '! ! \n' \
' .'
FAILURE_STEP = ' , . \n' \
' , \n' \
' . !'
WINNING = '! \n' \
', ) \n' \
' .'
LOSING = ', . \n' \
' . ! \n' \
' .'
NAMES = ['', '', '', '', ' ', '',
'', '', '', '', '', '', '',
'', '', '', '', '']
| 48.05 | 90 | 0.533299 |
4c0b4ff66b7c9992658d93e432fdd2bd5452694f
| 3,587 |
py
|
Python
|
api/migrations/0001_initial.py
|
alerin345/Instagram-React
|
25dfbcbff2a2d050e4f2804a74cd7c901cd2cb66
|
[
"MIT"
] | null | null | null |
api/migrations/0001_initial.py
|
alerin345/Instagram-React
|
25dfbcbff2a2d050e4f2804a74cd7c901cd2cb66
|
[
"MIT"
] | null | null | null |
api/migrations/0001_initial.py
|
alerin345/Instagram-React
|
25dfbcbff2a2d050e4f2804a74cd7c901cd2cb66
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.1.3 on 2021-01-07 00:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
| 48.472973 | 170 | 0.611096 |
4c0c9d4712283b7b6b90ddca4309f49cea6694d9
| 737 |
py
|
Python
|
fastapi_router_controller/lib/controller_loader.py
|
KiraPC/fastapi-router-controller
|
e105701ebce2e03a0e00ac182c10941daf1b7e22
|
[
"MIT"
] | 21 |
2021-03-30T19:39:46.000Z
|
2022-03-30T22:27:39.000Z
|
fastapi_router_controller/lib/controller_loader.py
|
KiraPC/fastapi-router-controller
|
e105701ebce2e03a0e00ac182c10941daf1b7e22
|
[
"MIT"
] | 12 |
2021-03-30T20:52:15.000Z
|
2022-02-23T09:20:42.000Z
|
fastapi_router_controller/lib/controller_loader.py
|
KiraPC/fastapi-router-controller
|
e105701ebce2e03a0e00ac182c10941daf1b7e22
|
[
"MIT"
] | 6 |
2021-04-03T19:17:55.000Z
|
2021-12-20T10:20:57.000Z
|
import os
import importlib
| 29.48 | 79 | 0.561737 |
4c0d5b44bfd54d1398052b98c3fc9dbc04be5c4f
| 601 |
py
|
Python
|
app/mod_ecomm/controllers.py
|
VikrantReddy/Instagram2Shop
|
8d9c3f39d277fafb56d10a87a1b62a6df8a74237
|
[
"MIT"
] | null | null | null |
app/mod_ecomm/controllers.py
|
VikrantReddy/Instagram2Shop
|
8d9c3f39d277fafb56d10a87a1b62a6df8a74237
|
[
"MIT"
] | null | null | null |
app/mod_ecomm/controllers.py
|
VikrantReddy/Instagram2Shop
|
8d9c3f39d277fafb56d10a87a1b62a6df8a74237
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, Flask, send_from_directory
from werkzeug.security import check_password_hash, generate_password_hash
from app import db
from app.mod_auth.forms import LoginForm
from app.mod_auth.models import User
mod_ecomm = Blueprint('products', __name__, url_prefix='/products',
static_folder='../../frontend/build')
| 33.388889 | 74 | 0.71381 |
4c0dc6446f67b743dcdbd74576706d0d6a1843b4
| 118 |
py
|
Python
|
dagr_selenium/crawl_watchlist.py
|
phillmac/dagr_selenium
|
b7417a878fe4c171625a40e746113ae2c0222335
|
[
"MIT"
] | null | null | null |
dagr_selenium/crawl_watchlist.py
|
phillmac/dagr_selenium
|
b7417a878fe4c171625a40e746113ae2c0222335
|
[
"MIT"
] | 1 |
2021-12-14T06:05:26.000Z
|
2021-12-14T06:05:26.000Z
|
dagr_selenium/crawl_watchlist.py
|
phillmac/dagr_selenium
|
b7417a878fe4c171625a40e746113ae2c0222335
|
[
"MIT"
] | null | null | null |
from .functions import monitor_watchlist_action, manager
with manager.get_dagr():
monitor_watchlist_action()
| 23.6 | 57 | 0.788136 |
4c0e902c9bd14492f727e042bd245ed10c04c202
| 2,739 |
py
|
Python
|
zenslackchat/eventsview.py
|
uktrade/zenslackchat
|
8071757e1ea20a433783c6a7c47f25b046692682
|
[
"MIT"
] | 2 |
2020-12-30T07:46:12.000Z
|
2022-02-01T16:37:34.000Z
|
zenslackchat/eventsview.py
|
uktrade/zenslackchat
|
8071757e1ea20a433783c6a7c47f25b046692682
|
[
"MIT"
] | 7 |
2021-04-14T16:17:29.000Z
|
2022-01-25T11:48:18.000Z
|
zenslackchat/eventsview.py
|
uktrade/zenslackchat
|
8071757e1ea20a433783c6a7c47f25b046692682
|
[
"MIT"
] | 1 |
2021-06-06T09:46:47.000Z
|
2021-06-06T09:46:47.000Z
|
import pprint
import logging
from django.conf import settings
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.response import Response
from zenslackchat.message import handler
from zenslackchat.models import SlackApp
from zenslackchat.models import ZendeskApp
| 38.041667 | 79 | 0.649507 |
4c0ea6f1c1da094761872bcebae0cfc6089b3d54
| 16,882 |
py
|
Python
|
sdv/docker/sdvstate/internal/validator/airship/compute_check.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | 2 |
2021-09-16T06:31:45.000Z
|
2022-03-09T19:59:55.000Z
|
sdv/docker/sdvstate/internal/validator/airship/compute_check.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | null | null | null |
sdv/docker/sdvstate/internal/validator/airship/compute_check.py
|
opnfv/cirv-sdv
|
31fb310d3fd1c9c1f12cfe0c654870e24f5efab6
|
[
"Apache-2.0"
] | 2 |
2021-05-11T14:41:01.000Z
|
2021-05-14T05:59:38.000Z
|
# Copyright 2020 University Of Delhi.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Compute Related Checks
"""
import configparser
import json
import re
import logging
from tools.kube_utils import kube_exec, get_pod_with_labels
from tools.conf import settings
from internal import store_result
###########
# Checks
###########
def isolated_cores_check():
"""
isolated_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_isolated_cores()
required_value = required_isolated_cores()
result = {'category': 'compute',
'case_name': 'isolated_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def reserved_vnf_cores_check():
"""
reserved_vnf_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_reserved_vnf_cores()
required_value = required_reserved_vnf_cores()
result = {'category': 'compute',
'case_name': 'reserved_vnf_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def vswitch_pmd_cores_check():
"""
vswitch_pmd_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_vswitch_pmd_cores()
required_value = required_vswitch_pmd_cores()
result = {'category': 'compute',
'case_name': 'vswitch_pmd_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def vswitch_dpdk_lcores_check():
"""
vswitch_dpdk_lcores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_vswitch_dpdk_lcores()
required_value = required_vswitch_dpdk_lcores()
result = {'category': 'compute',
'case_name': 'vswitch_dpdk_lcores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def os_reserved_cores_check():
"""
os_reserved_cores_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_os_reserved_cores()
required_value = required_os_reserved_cores()
result = {'category': 'compute',
'case_name': 'os_reserved_cores_check',
'details': {'traced_cores': traced_value,
'required_cores': required_value
}
}
if is_ranges_equals(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def nova_scheduler_filters_check():
"""
nova_scheduler_filters_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_nova_scheduler_filters()
required_value = required_nova_scheduler_filters()
result = {'category': 'compute',
'case_name': 'nova_scheduler_filters_check',
'details': {'traced_filters': traced_value,
'required_filters': required_value
}
}
if are_lists_equal(traced_value, required_value):
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
def cpu_allocation_ratio_check():
"""
cpu_allocation_ratio_check
"""
logger = logging.getLogger(__name__)
traced_value = trace_cpu_allocation_ratio()
required_value = required_cpu_allocation_ratio()
result = {'category': 'compute',
'case_name': 'cpu_allocation_ratio_check',
'details': {'traced_ratio': traced_value,
'required_ratio': required_value
}
}
if traced_value == required_value:
result['criteria'] = 'pass'
else:
result['criteria'] = 'fail'
store_result(logger, result)
return result
###############
# helper functions
###############
def trace_isolated_cores():
"""
Trace isolated_cores from Airship deployment
:return: value traced from `isolcpus` key in `/proc/cmdline`
"""
pod = get_pod_with_labels('application=nova,component=compute')
cmd = ['cat', '/proc/cmdline']
proc_cmd = kube_exec(pod, cmd)
for option in proc_cmd.split():
if 'isolcpus' in option:
_, isolcpus_value = split_key_value(option)
break
return isolcpus_value
def required_isolated_cores():
"""
Returns value of `isolated_cpus` from platform_profile used by
Role for worker nodes in PDF
:return: isolated_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['isolated_cpus']
def trace_reserved_vnf_cores():
"""
Trace vnf_reserved_cores from Airship deployment
:return: value traced from `vcpu_pin_set` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
vcpu_pin_set = config.get('DEFAULT', 'vcpu_pin_set')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
vcpu_pin_set = ''
return vcpu_pin_set
def required_reserved_vnf_cores():
"""
Returns value of vnf_cores from platform_profile used by
Role for worker nodes in PDF
:return: vnf_reserverd_core value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vnf_cores']
def trace_vswitch_pmd_cores():
"""
Trace vswitch_pmd_cores from Airship deployment
:return: value traced from `other_config:pmd-cpu-mask` in
openvswitchdb using ovs-vsctl
"""
ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
response = kube_exec(ovs_pod, cmd)
# convert config str to json str
match = re.findall("[a-zA-Z0-9-]+=", response)
for key in match:
response = response.replace(key, '"' + key[:-1] + '":')
match = re.findall(":[a-zA-Z0-9-]+", response)
for key in match:
response = response.replace(key[1:], '"' + key[1:] + '"')
config = json.loads(response)
if 'pmd-cpu-mask' in config:
pmd_cores = hex_to_comma_list(config['pmd-cpu-mask'])
else:
pmd_cores = ''
return pmd_cores
def required_vswitch_pmd_cores():
"""
Returns value of vswitch_pmd_cores from platform_profile used by
Role for worker nodes in PDF
:return: vswitch_pmd_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vswitch_pmd_cores']
def trace_vswitch_dpdk_lcores():
"""
Trace vswitch_dpdk_lcores from Airship deployment
:return: value traced from `other_config:dpdk-lcore-mask` in
openvswitchdb using ovs-vsctl
"""
ovs_pod = get_pod_with_labels('application=openvswitch,component=openvswitch-vswitchd')
cmd = ['ovs-vsctl', '-t', '5', 'get', 'Open_vSwitch', '.', 'other_config']
response = kube_exec(ovs_pod, cmd)
# convert config str to json str
match = re.findall("[a-zA-Z0-9-]+=", response)
for key in match:
response = response.replace(key, '"' + key[:-1] + '":')
match = re.findall(":[a-zA-Z0-9-]+", response)
for key in match:
response = response.replace(key[1:], '"' + key[1:] + '"')
config = json.loads(response)
if 'dpdk-lcore-mask' in config:
pmd_cores = hex_to_comma_list(config['dpdk-lcore-mask'])
else:
pmd_cores = ''
return pmd_cores
def required_vswitch_dpdk_lcores():
"""
Returns value of vswitch_dpdk_lcores from platform_profile used by
Role for worker nodes in PDF
:return: vswitch_dpdk_lcores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['vswitch_dpdk_lcores']
def trace_os_reserved_cores():
"""
Trace os_reserved_cores from Airship deployment
os_reserved_cores = all_cores - (reserved_vnf_cores +
vswitch_pmd_cores +
vswitch_dpdk_lcores)
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
all_cores = get_cores_by_role(worker_role)
reserved_vnf_cores = trace_reserved_vnf_cores()
vswitch_pmd_cores = trace_vswitch_pmd_cores()
vswitch_dpdk_lcores = trace_vswitch_dpdk_lcores()
non_os_cores = []
non_os_cores.extend(convert_range_to_list(reserved_vnf_cores))
non_os_cores.extend(convert_range_to_list(vswitch_pmd_cores))
non_os_cores.extend(convert_range_to_list(vswitch_dpdk_lcores))
os_reserved_cores = set(all_cores).difference(set(non_os_cores))
# return as string with comma separated value
return ','.join(map(str, list(os_reserved_cores)))
def required_os_reserved_cores():
"""
Returns value of os_reserved_cores from platform_profile used by
Role for worker nodes in PDF
:return: os_reserved_cores value expected by the PDF
"""
worker_role = settings.getValue('WORKER_ROLE_NAME')
profile = get_platform_profile_by_role(worker_role)
return profile['os_reserved_cores']
def trace_nova_scheduler_filters():
"""
Trace scheduler_filters from Airship deployment
:return: value traced from `enabled_filters` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
filters = config.get('filter_scheduler', 'enabled_filters')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
filters = ''
filters = filters.split(',')
map(str.strip, filters)
return filters
def required_nova_scheduler_filters():
"""
Required nova scheduler_filters by the PDF
"""
pdf = settings.getValue('pdf_file')
filters = pdf['vim_functional']['scheduler_filters']
filters = filters.split(',')
map(str.strip, filters)
return filters
def trace_cpu_allocation_ratio():
"""
Trace cpu_allocation_ratio from Airship deployment
:return: value traced from `cpu_allocation_ratio` key in nova.conf
of actual deployment
"""
try:
config = get_nova_conf()
cpu_allocation_ratio = config.get('DEFAULT', 'cpu_allocation_ratio')
except (configparser.NoOptionError, configparser.MissingSectionHeaderError):
cpu_allocation_ratio = ''
return float(cpu_allocation_ratio)
def required_cpu_allocation_ratio():
"""
Required cpu_allocation_ratio by the PDF
"""
pdf = settings.getValue('pdf_file')
cpu_allocation_ratio = pdf['vim_functional']['cpu_allocation_ratio']
return float(cpu_allocation_ratio)
def get_role(role_name):
"""
Searches and returns role with `role_name`
"""
roles = settings.getValue('pdf_file')['roles']
for role in roles:
if role['name'] == role_name:
role_details = role
return role_details
def get_platform_profile(profile_name):
"""
Searches and returns platform_profile with `profile_name`
"""
platform_profiles = settings.getValue('pdf_file')['platform_profiles']
for profile in platform_profiles:
if profile['profile_name'] == profile_name:
profile_details = profile
return profile_details
def get_processor_profile(profile_name):
"""
Searches and returns processor_profile with `profile_name`
"""
processor_profiles = settings.getValue('pdf_file')['processor_profiles']
for profile in processor_profiles:
if profile['profile_name'] == profile_name:
profile_details = profile
return profile_details
def get_platform_profile_by_role(role_name):
"""
Returns platform profile details of a role
"""
role = get_role(role_name)
profile = get_platform_profile(role['platform_profile'])
return profile
def get_hardware_profile_by_role(role_name):
"""
Returns hardware profile details of a role
"""
role = get_role(role_name)
hardware_profiles = settings.getValue('pdf_file')['hardware_profiles']
for profile in hardware_profiles:
if profile['profile_name'] == role['hardware_profile']:
profile_details = profile
return profile_details
def get_cores_by_role(role_name):
"""
Returns cpu cores list of server hardware used in the role
"""
hardware_profile = get_hardware_profile_by_role(role_name)
processor_profile = hardware_profile['profile_info']['processor_profile']
profile = get_processor_profile(processor_profile)
cpus = []
for numa in profile['profile_info']['numas']:
cpus.extend(convert_range_to_list(numa['cpu_set']))
return cpus
def get_nova_conf():
"""
Returns parsed nova.conf
"""
pod = get_pod_with_labels('application=nova,component=compute')
cmd = ['cat', '/etc/nova/nova.conf']
response = kube_exec(pod, cmd)
config = configparser.ConfigParser()
config.read_string(response)
return config
### cpu cores related helper function
def convert_range_to_list(x):
"""
Returns list of numbers from given range as string
e.g.: convert_range_to_list('3-5') will give [3, 4, 5]
"""
# pylint: disable=C0103
result = []
for part in x.split(','):
if '-' in part:
a, b = part.split('-')
a, b = int(a), int(b)
result.extend(range(a, b + 1))
elif part != '':
a = int(part)
result.append(a)
# remove duplicates
result = list(dict.fromkeys(result))
return result
def is_ranges_equals(range1, range2):
"""
Checks whether two ranges passed as string are equal
e.g.: is_ranges_equals('2-5', '2-4,5') returns true
"""
set1 = set(convert_range_to_list(range1))
set2 = set(convert_range_to_list(range2))
return set1 == set2
def are_lists_equal(list1, list2):
"""
Checks whether two list are identicals
"""
set1 = set(list1)
set2 = set(list2)
return set1 == set2
def hex_to_comma_list(hex_mask):
"""
Converts CPU mask given in hex to list of cores
"""
binary = bin(int(hex_mask, 16))[2:]
reversed_binary = binary[::-1]
i = 0
output = ""
for bit in reversed_binary:
if bit == '1':
output = output + str(i) + ','
i = i + 1
return output[:-1]
def comma_list_to_hex(cpus):
"""
Converts a list of cpu cores in corresponding hex value
of cpu-mask
"""
cpu_arr = cpus.split(",")
binary_mask = 0
for cpu in cpu_arr:
binary_mask = binary_mask | (1 << int(cpu))
return format(binary_mask, '02x')
def split_key_value(key_value_str, delimiter='='):
"""
splits given string into key and value based on delimiter
:param key_value_str: example string `someKey=somevalue`
:param delimiter: default delimiter is `=`
:return: [ key, value]
"""
key, value = key_value_str.split(delimiter)
key = key.strip()
value = value.strip()
return key, value
| 25.197015 | 91 | 0.65093 |
4c0f174360fe29201e22d16e102aa2c61bad20f2
| 262 |
py
|
Python
|
production/pygsl-0.9.5/testing/__init__.py
|
juhnowski/FishingRod
|
457e7afb5cab424296dff95e1acf10ebf70d32a9
|
[
"MIT"
] | 1 |
2019-07-29T02:53:51.000Z
|
2019-07-29T02:53:51.000Z
|
production/pygsl-0.9.5/testing/__init__.py
|
juhnowski/FishingRod
|
457e7afb5cab424296dff95e1acf10ebf70d32a9
|
[
"MIT"
] | 1 |
2021-09-11T14:30:32.000Z
|
2021-09-11T14:30:32.000Z
|
Dockerfiles/gedlab-khmer-filter-abund/pymodules/python2.7/lib/python/pygsl/testing/__init__.py
|
poojavade/Genomics_Docker
|
829b5094bba18bbe03ae97daf925fee40a8476e8
|
[
"Apache-2.0"
] | 2 |
2016-12-19T02:27:46.000Z
|
2019-07-29T02:53:54.000Z
|
"""
Here you find either new implemented modules or alternate implementations
of already modules. This directory is intended to have a second implementation
beside the main implementation to have a discussion which implementation to
favor on the long run.
"""
| 37.428571 | 78 | 0.80916 |
4c10bad25f060a9091150e60b188f22ceaae17b0
| 14,801 |
py
|
Python
|
PythonServer/UnitTestCasesForWebSocket.py
|
Cyberlightning/2D-3DCapture
|
e5fdcec4f25358fc1964068180e4e774f45daa8a
|
[
"Apache-2.0"
] | 2 |
2015-11-04T10:21:48.000Z
|
2016-03-07T15:14:35.000Z
|
2D-3D-Capture/PythonServer/UnitTestCasesForWebSocket.py
|
Cyberlightning/Cyber-WeX
|
11dc560b7a30eb31c1dfa18196f6a0760648f9a7
|
[
"Apache-2.0"
] | null | null | null |
2D-3D-Capture/PythonServer/UnitTestCasesForWebSocket.py
|
Cyberlightning/Cyber-WeX
|
11dc560b7a30eb31c1dfa18196f6a0760648f9a7
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Mar 6, 2014
@author: tharanga
'''
import unittest
from time import sleep
import EventService as es
from EventService import WebSocketServer as ws
from EventService import EventManager as em
import socket
from base64 import b64encode
import struct
import MySQLdb
import json
import EventService
import flaskr
import tempfile
##TO RUN THE FOLLOWING UNIT TESTS IT IS EXPECTED HAVE THE DATABASE
##CREATED. DATABASE SCRIPT IS PROVIDED TO CREATE THE NECESSARY DATABASES AND TABLES
##ASSISCIATED DATA IS NOT PROVIDED.
HOST = '127.0.0.1' # The remote host
PORT = 17322
suite = suite()
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
# if __name__ == "__main__":
# #import sys;sys.argv = ['', 'Test.testName']
# unittest.main()
| 57.368217 | 386 | 0.646375 |
4c1112a8d3df95d531441fb2f11172b25c1ca8ae
| 1,945 |
py
|
Python
|
src/tests/client_side/test_main.py
|
JulianSobott/OpenDrive
|
0593c994c3bccccc4351557c42d13f3535b6b6c1
|
[
"Apache-2.0"
] | 1 |
2021-03-18T16:20:46.000Z
|
2021-03-18T16:20:46.000Z
|
src/tests/client_side/test_main.py
|
JulianSobott/OpenDrive
|
0593c994c3bccccc4351557c42d13f3535b6b6c1
|
[
"Apache-2.0"
] | 2 |
2019-06-04T21:50:23.000Z
|
2019-06-14T13:20:50.000Z
|
src/tests/client_side/test_main.py
|
JulianSobott/OpenDrive
|
0593c994c3bccccc4351557c42d13f3535b6b6c1
|
[
"Apache-2.0"
] | null | null | null |
import os
import threading
import time
import unittest
from OpenDrive.client_side import file_changes_json as c_json
from OpenDrive.client_side import interface
from OpenDrive.client_side import main
from OpenDrive.client_side import paths as client_paths
from OpenDrive.server_side import paths as server_paths
from tests.client_side.helper_client import h_register_dummy_user_device_client
from tests.helper_all import h_client_routine, h_start_server_process, h_stop_server_process, \
h_clear_init_all_folders, h_create_empty
| 42.282609 | 108 | 0.748586 |
4c131d63691e04e79320d304f39d4fe881bda148
| 1,850 |
py
|
Python
|
site-packages/skimage/io/tests/test_io.py
|
oz90210/Pyto
|
59f185149b71e57e5debeb1c9a61a28739e81720
|
[
"MIT"
] | null | null | null |
site-packages/skimage/io/tests/test_io.py
|
oz90210/Pyto
|
59f185149b71e57e5debeb1c9a61a28739e81720
|
[
"MIT"
] | 1 |
2020-04-25T20:36:07.000Z
|
2020-04-25T20:36:07.000Z
|
site-packages/skimage/io/tests/test_io.py
|
Wristlebane/Pyto
|
901ac307b68486d8289105c159ca702318bea5b0
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
from skimage import io, data_dir
from skimage._shared import testing
from skimage._shared.testing import assert_array_equal
one_by_one_jpeg = (
b'\xff\xd8\xff\xe0\x00\x10JFIF\x00\x01\x01\x00\x00\x01'
b'\x00\x01\x00\x00\xff\xdb\x00C\x00\x03\x02\x02\x02\x02'
b'\x02\x03\x02\x02\x02\x03\x03\x03\x03\x04\x06\x04\x04'
b'\x04\x04\x04\x08\x06\x06\x05\x06\t\x08\n\n\t\x08\t\t'
b'\n\x0c\x0f\x0c\n\x0b\x0e\x0b\t\t\r\x11\r\x0e\x0f\x10'
b'\x10\x11\x10\n\x0c\x12\x13\x12\x10\x13\x0f\x10\x10'
b'\x10\xff\xc0\x00\x0b\x08\x00\x01\x00\x01\x01\x01\x11'
b'\x00\xff\xc4\x00\x14\x00\x01\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\t\xff\xc4\x00'
b'\x14\x10\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\xff\xda\x00\x08\x01\x01\x00'
b'\x00?\x00*\x9f\xff\xd9'
)
| 33.636364 | 70 | 0.677838 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.