id
stringlengths 1
265
| text
stringlengths 6
5.19M
| dataset_id
stringclasses 7
values |
---|---|---|
/GmoCoin-0.0.13.zip/GmoCoin-0.0.13/gmocoin/public/dto.py | from marshmallow import fields, pre_load
from marshmallow_enum import EnumField
from enum import Enum
from datetime import datetime
from pytz import timezone
from typing import List
from decimal import Decimal
from ..common.dto import BaseSchema, BaseResponse, BaseResponseSchema, Status, Symbol, SalesSide
class GetStatusData:
"""
取引所稼動状態データクラスです。
"""
def __init__(self, status: Status) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
"""
self.status = status
class GetStatusDataSchema(BaseSchema):
"""
取引所稼動状態データスキーマクラスです。
"""
__model__ = GetStatusData
status = EnumField(Status, data_key='status')
class GetStatusRes(BaseResponse):
"""
取引所稼動状態レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: GetStatusData) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetStatusResSchema(BaseResponseSchema):
"""
取引所稼動状態レスポンススキーマクラスです。
"""
__model__ = GetStatusRes
data = fields.Nested(GetStatusDataSchema, data_key='data')
class GetTickerData:
"""
銘柄最新レートデータクラスです。
"""
def __init__(self, symbol: Symbol, timestamp: datetime, volume: Decimal, ask: Decimal, bid: Decimal, high: Decimal, last: Decimal, low: Decimal) -> None:
"""
コンストラクタです。
Args:
ask:
bid:
high:
last:
low:
symbol:
timestamp:
volume:
"""
self.ask = ask
self.bid = bid
self.high = high
self.last = last
self.low = low
self.symbol = symbol
self.timestamp = timestamp.astimezone(timezone('Asia/Tokyo'))
self.volume = volume
class GetTickerDataSchema(BaseSchema):
"""
銘柄最新レートデータスキーマクラスです。
"""
__model__ = GetTickerData
ask = fields.Decimal(data_key='ask')
bid = fields.Decimal(data_key='bid')
high = fields.Decimal(data_key='high')
last = fields.Decimal(data_key='last')
low = fields.Decimal(data_key='low')
symbol = EnumField(Symbol, data_key='symbol')
timestamp = fields.DateTime(format='%Y-%m-%dT%H:%M:%S.%fZ', data_key='timestamp')
volume = fields.Decimal(data_key='volume')
@pre_load
def convert_none_to_zero(self, in_data, **kwargs):
"""
Noneを0に変換する関数です。
Args:
in_data:
kwargs:
Returns:
in_data
"""
for key in ['ask', 'bid', 'high', 'last', 'low']:
if in_data[key] is None:
in_data[key] = 0
return in_data
class GetTickerRes(BaseResponse):
"""
銘柄最新レートレスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: List[GetTickerData]) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetTickerResSchema(BaseResponseSchema):
"""
銘柄最新レートレスポンススキーマクラスです。
"""
__model__ = GetTickerRes
data = fields.Nested(GetTickerDataSchema, data_key='data', many=True)
class OrderData:
"""
注文データクラスです。
"""
def __init__(self, price: Decimal, size: Decimal) -> None:
"""
コンストラクタです。
Args:
price:
取引金額を設定します。
size:
取引数量を設定します。
"""
self.price = price
self.size = size
class OrderDataSchema(BaseSchema):
"""
注文データスキーマクラスです。
"""
__model__ = OrderData
price = fields.Decimal(data_key='price')
size = fields.Decimal(data_key='size')
class GetOrderBooksData:
"""
銘柄板データクラスです。
"""
def __init__(self, asks: List[OrderData], bids: List[OrderData], symbol: Symbol) -> None:
"""
コンストラクタです。
Args:
asks:
売り注文の情報を設定します。
bids:
買い注文の情報を設定します。
symbol:
銘柄を設定します。
"""
self.asks = asks
self.bids = bids
self.symbol = symbol
class GetOrderBooksDataSchema(BaseSchema):
"""
銘柄板データスキーマクラスです。
"""
__model__ = GetOrderBooksData
asks = fields.Nested(OrderDataSchema, data_key='asks', many=True)
bids = fields.Nested(OrderDataSchema, data_key='bids', many=True)
symbol = EnumField(Symbol, data_key='symbol')
class GetOrderBooksRes(BaseResponse):
"""
銘柄板レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: GetOrderBooksData) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetOrderBooksResSchema(BaseResponseSchema):
"""
銘柄板レスポンススキーマクラスです。
"""
__model__ = GetOrderBooksRes
data = fields.Nested(GetOrderBooksDataSchema, data_key='data')
class TradesPagenation:
"""
取引ページングデータクラスです。
"""
def __init__(self, current_page: int, count: int) -> None:
"""
コンストラクタです。
Args:
current_page:
現在のページ番号を設定します。
count:
データ数を設定します。
"""
self.current_page = current_page
self.count = count
class TradesPagenationSchema(BaseSchema):
"""
取引ページングデータスキーマクラスです。
"""
__model__ = TradesPagenation
current_page = fields.Int(data_key='currentPage')
count = fields.Int(data_key='count')
class Trade:
"""
取引データクラスです。
"""
def __init__(self, price: Decimal, side: SalesSide, size: Decimal, timestamp: datetime) -> None:
"""
コンストラクタです。
Args:
price:
取引価格を設定します。
side:
売買種別を設定します。
size:
取引数量を設定します。
timestamp:
取引日時を設定します。
"""
self.price = price
self.side = side
self.size = size
self.timestamp = timestamp.astimezone(timezone('Asia/Tokyo'))
class TradeSchema(BaseSchema):
"""
取引データスキーマクラスです。
"""
__model__ = Trade
price = fields.Decimal(data_key='price')
side = EnumField(SalesSide, data_key='side')
size = fields.Decimal(data_key='size')
timestamp = fields.DateTime(format='%Y-%m-%dT%H:%M:%S.%fZ', data_key='timestamp')
class GetTradesData:
"""
取引履歴データクラスです。
"""
def __init__(self, pagination: TradesPagenation, trades: List[Trade]) -> None:
"""
コンストラクタです。
Args:
pagination:
ページングを設定します。
trades:
取引リストを設定します。
"""
self.pagination = pagination
self.trades = trades
class GetTradesDataSchema(BaseSchema):
"""
取引履歴データスキーマクラスです。
"""
__model__ = GetTradesData
pagination = fields.Nested(TradesPagenationSchema, data_key='pagination')
trades = fields.Nested(TradeSchema, data_key='list', many=True)
class GetTradesRes(BaseResponse):
"""
取引履歴レスポンスクラスです。
"""
def __init__(self, status: int, responsetime: datetime, data: GetTradesData) -> None:
"""
コンストラクタです。
Args:
status:
ステータスコードを設定します。
responsetime:
レスポンスタイムを設定します。
data:
レスポンスデータを設定します。
"""
super().__init__(status, responsetime)
self.data = data
class GetTradesResSchema(BaseResponseSchema):
"""
取引履歴レスポンススキーマです。
"""
__model__ = GetTradesRes
data = fields.Nested(GetTradesDataSchema, data_key='data') | PypiClean |
/NBT-1.5.1.tar.gz/NBT-1.5.1/README.txt | ==========================
The NBT library for Python
==========================
Forewords
=========
This is mainly a `Named Binary Tag` parser & writer library.
From the initial specification by Markus Persson::
NBT (Named Binary Tag) is a tag based binary format designed to carry large
amounts of binary data with smaller amounts of additional data.
An NBT file consists of a single GZIPped Named Tag of type TAG_Compound.
Current specification is on the official [Minecraft Wiki](https://minecraft.gamepedia.com/NBT_format).
This library is very suited to inspect & edit the Minecraft data files. Provided
examples demonstrate how to:
- get player and world statistics,
- list mobs, chest contents, biomes,
- draw a simple world map,
- etc.
.. image:: world.png
*Note: Examples are just here to help using and testing the library.
Developing Minecraft tools is out of the scope of this project.*
Status
======
The library supports all the currently known tag types (including the arrays
of 'Integer' and 'Long'), and the examples work with the McRegion,
pre-"flattened" and "flattened" Anvil formats.
Last update was tested on Minecraft version **1.13.2**.
Dependencies
============
The library, the tests and the examples are only using the Python core library,
except `curl` for downloading some test reference data and `PIL` (Python
Imaging Library) for the `map` example.
Supported Python releases: 2.7, 3.4 to 3.7
Usage
=====
Reading files
-------------
The easiest way to read an nbt file is to instantiate an NBTFile object e.g.::
>>> from nbt import nbt
>>> nbtfile = nbt.NBTFile("bigtest.nbt",'rb')
>>> nbtfile.name
u'Level'
>>> nbtfile["nested compound test"].tag_info()
TAG_Compound("nested compound test"): 2 Entries
>>> for tag in nbtfile["nested compound test"]["ham"].tags:
... print(tag.tag_info())
...
TAG_String("name"): Hampus
TAG_Float("value"): 0.75
>>> [tag.value for tag in nbtfile["listTest (long)"].value]
[11, 12, 13, 14, 15]
Files can also be read from a fileobj (file-like object that contains a compressed
stream) or a buffer (file-like object that contains an uncompressed stream of NBT
Tags) which can be accomplished thusly::
>>> from nbt.nbt import *
>>> nbtfile = NBTFile(fileobj=previously_opened_file)
# or....
>>> nbtfile = NBTFile(buffer=net_socket.makefile())
Writing files
-------------
Writing files is easy too! if you have a NBTFile object, simply call it's
write_file() method. If the NBTFile was instantiated with a filename, then
write_file needs no extra arguments. It just works. If however you created a new
file object from scratch (or even if you just want to save it somewhere else)
call write_file('path\to\new\file.nbt')::
>>> from nbt import nbt
>>> nbtfile = nbt.NBTFile("bigtest.nbt",'rb')
>>> nbtfile["listTest (compound)"].tags[0]["name"].value = "Different name"
>>> nbtfile.write_file("newnbtfile.nbt")
It is also possible to write to a buffer or fileobj using the same keyword args::
>>> nbtfile.write_file(fileobj = my_file) #compressed
>>> nbtfile.write_file(buffer = sock.makefile()) #uncompressed
Creating files
--------------
Creating files is trickier but ultimately should give you no issue, as long as
you have read the NBT spec (hint.. it's very short). Also be sure to note that
the NBTFile object is actually a TAG_Compound with some wrapper features, so
you can use all the standard tag features::
>>> from nbt.nbt import *
>>> nbtfile = NBTFile()
First, don't forget to name the top level tag::
>>> nbtfile.name = "My Top Level Tag"
>>> nbtfile.tags.append(TAG_Float(name="My Float Name", value=3.152987593947))
>>> mylist = TAG_List(name="TestList", type=TAG_Long) #type needs to be pre-declared!
>>> mylist.tags.append(TAG_Long(100))
>>> mylist.tags.extend([TAG_Long(120),TAG_Long(320),TAG_Long(19)])
>>> nbtfile.tags.append(mylist)
>>> print(nbtfile.pretty_tree())
TAG_Compound("My Top Level Tag"): 2 Entries
{
TAG_Float("My Float Name"): 3.15298759395
TAG_List("TestList"): 4 entries of type TAG_Long
{
TAG_Long: 100
TAG_Long: 120
TAG_Long: 320
TAG_Long: 19
}
}
>>> nbtfile["TestList"].tags.sort(key = lambda tag: tag.value)
>>> print(nbtfile.pretty_tree())
TAG_Compound("My Top Level Tag"): 2 Entries
{
TAG_Float("My FloatName"): 3.15298759395
TAG_List("TestList"): 4 entries of type TAG_Long
{
TAG_Long: 19
TAG_Long: 100
TAG_Long: 120
TAG_Long: 320
}
}
>>> nbtfile.write_file("mynbt.dat")
| PypiClean |
/OASYS1-SHADOWFOUR-0.0.4.tar.gz/OASYS1-SHADOWFOUR-0.0.4/orangecontrib/shadow4/widgets/gui/plots.py | from oasys.widgets import gui as oasysgui
def plot_data1D(x, y,
title="", xtitle="", ytitle="",
log_x=False, log_y=False, color='blue', replace=True, control=False,
xrange=None, yrange=None, symbol=''):
plot_widget_id = oasysgui.plotWindow(parent=None,
backend=None,
resetzoom=True,
autoScale=False,
logScale=True,
grid=True,
curveStyle=True,
colormap=False,
aspectRatio=False,
yInverted=False,
copy=True,
save=True,
print_=True,
control=control,
position=True,
roi=False,
mask=False,
fit=False)
plot_widget_id.setDefaultPlotLines(True)
plot_widget_id.setActiveCurveColor(color='blue')
plot_widget_id.setGraphXLabel(xtitle)
plot_widget_id.setGraphYLabel(ytitle)
plot_widget_id.addCurve(x, y, title, symbol=symbol, color=color, xlabel=xtitle, ylabel=ytitle, replace=replace) # '+', '^', ','
if not xtitle is None: plot_widget_id.setGraphXLabel(xtitle)
if not ytitle is None: plot_widget_id.setGraphYLabel(ytitle)
if not title is None: plot_widget_id.setGraphTitle(title)
plot_widget_id.resetZoom()
plot_widget_id.replot()
plot_widget_id.setActiveCurve(title)
plot_widget_id.setXAxisLogarithmic(log_x)
plot_widget_id.setYAxisLogarithmic(log_y)
if xrange is not None:
plot_widget_id.setGraphXLimits(xrange[0] ,xrange[1])
if yrange is not None:
plot_widget_id.setGraphYLimits(yrange[0] ,yrange[1])
if min(y) < 0:
if log_y:
plot_widget_id.setGraphYLimits(min(y ) *1.2, max(y ) *1.2)
else:
plot_widget_id.setGraphYLimits(min(y ) *1.01, max(y ) *1.01)
else:
if log_y:
plot_widget_id.setGraphYLimits(min(y), max(y ) *1.2)
else:
plot_widget_id.setGraphYLimits(min(y ) *0.99, max(y ) *1.01)
return plot_widget_id | PypiClean |
/DjangoDjangoAppCenter-0.0.11-py3-none-any.whl/DjangoAppCenter/simpleui/static/admin/simpleui-x/elementui/calendar.js | module.exports =
/******/ (function (modules) { // webpackBootstrap
/******/ // The module cache
/******/
var installedModules = {};
/******/
/******/ // The require function
/******/
function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/
if (installedModules[moduleId]) {
/******/
return installedModules[moduleId].exports;
/******/
}
/******/ // Create a new module (and put it into the cache)
/******/
var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/
};
/******/
/******/ // Execute the module function
/******/
modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/
module.l = true;
/******/
/******/ // Return the exports of the module
/******/
return module.exports;
/******/
}
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/
__webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/
__webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/
__webpack_require__.d = function (exports, name, getter) {
/******/
if (!__webpack_require__.o(exports, name)) {
/******/
Object.defineProperty(exports, name, {enumerable: true, get: getter});
/******/
}
/******/
};
/******/
/******/ // define __esModule on exports
/******/
__webpack_require__.r = function (exports) {
/******/
if (typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/
Object.defineProperty(exports, Symbol.toStringTag, {value: 'Module'});
/******/
}
/******/
Object.defineProperty(exports, '__esModule', {value: true});
/******/
};
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/
__webpack_require__.t = function (value, mode) {
/******/
if (mode & 1) value = __webpack_require__(value);
/******/
if (mode & 8) return value;
/******/
if ((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/
var ns = Object.create(null);
/******/
__webpack_require__.r(ns);
/******/
Object.defineProperty(ns, 'default', {enumerable: true, value: value});
/******/
if (mode & 2 && typeof value != 'string') for (var key in value) __webpack_require__.d(ns, key, function (key) {
return value[key];
}.bind(null, key));
/******/
return ns;
/******/
};
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/
__webpack_require__.n = function (module) {
/******/
var getter = module && module.__esModule ?
/******/ function getDefault() {
return module['default'];
} :
/******/ function getModuleExports() {
return module;
};
/******/
__webpack_require__.d(getter, 'a', getter);
/******/
return getter;
/******/
};
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/
__webpack_require__.o = function (object, property) {
return Object.prototype.hasOwnProperty.call(object, property);
};
/******/
/******/ // __webpack_public_path__
/******/
__webpack_require__.p = "/dist/";
/******/
/******/
/******/ // Load entry module and return exports
/******/
return __webpack_require__(__webpack_require__.s = 66);
/******/
})
/************************************************************************/
/******/({
/***/ 0:
/***/ (function (module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */
__webpack_require__.d(__webpack_exports__, "a", function () {
return normalizeComponent;
});
/* globals __VUE_SSR_CONTEXT__ */
// IMPORTANT: Do NOT use ES2015 features in this file (except for modules).
// This module is a runtime utility for cleaner component module output and will
// be included in the final webpack user bundle.
function normalizeComponent(
scriptExports,
render,
staticRenderFns,
functionalTemplate,
injectStyles,
scopeId,
moduleIdentifier, /* server only */
shadowMode /* vue-cli only */
) {
// Vue.extend constructor export interop
var options = typeof scriptExports === 'function'
? scriptExports.options
: scriptExports
// render functions
if (render) {
options.render = render
options.staticRenderFns = staticRenderFns
options._compiled = true
}
// functional template
if (functionalTemplate) {
options.functional = true
}
// scopedId
if (scopeId) {
options._scopeId = 'data-v-' + scopeId
}
var hook
if (moduleIdentifier) { // server build
hook = function (context) {
// 2.3 injection
context =
context || // cached call
(this.$vnode && this.$vnode.ssrContext) || // stateful
(this.parent && this.parent.$vnode && this.parent.$vnode.ssrContext) // functional
// 2.2 with runInNewContext: true
if (!context && typeof __VUE_SSR_CONTEXT__ !== 'undefined') {
context = __VUE_SSR_CONTEXT__
}
// inject component styles
if (injectStyles) {
injectStyles.call(this, context)
}
// register component module identifier for async chunk inferrence
if (context && context._registeredComponents) {
context._registeredComponents.add(moduleIdentifier)
}
}
// used by ssr in case component is cached and beforeCreate
// never gets called
options._ssrRegister = hook
} else if (injectStyles) {
hook = shadowMode
? function () {
injectStyles.call(this, this.$root.$options.shadowRoot)
}
: injectStyles
}
if (hook) {
if (options.functional) {
// for template-only hot-reload because in that case the render fn doesn't
// go through the normalizer
options._injectStyles = hook
// register for functioal component in vue file
var originalRender = options.render
options.render = function renderWithStyleInjection(h, context) {
hook.call(context)
return originalRender(h, context)
}
} else {
// inject component registration as beforeCreate hook
var existing = options.beforeCreate
options.beforeCreate = existing
? [].concat(existing, hook)
: [hook]
}
}
return {
exports: scriptExports,
options: options
}
}
/***/
}),
/***/ 1:
/***/ (function (module, exports) {
module.exports = require("element-ui/lib/utils/date-util");
/***/
}),
/***/ 24:
/***/ (function (module, exports) {
module.exports = require("element-ui/lib/utils/date");
/***/
}),
/***/ 6:
/***/ (function (module, exports) {
module.exports = require("element-ui/lib/mixins/locale");
/***/
}),
/***/ 66:
/***/ (function (module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
// CONCATENATED MODULE: ./node_modules/[email protected]@vue-loader/lib/loaders/templateLoader.js??vue-loader-options!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/calendar/src/main.vue?vue&type=template&id=6d9756be&
var render = function () {
var _vm = this
var _h = _vm.$createElement
var _c = _vm._self._c || _h
return _c("div", {staticClass: "el-calendar"}, [
_c("div", {staticClass: "el-calendar__header"}, [
_c("div", {staticClass: "el-calendar__title"}, [
_vm._v("\n " + _vm._s(_vm.i18nDate) + "\n ")
]),
_vm.validatedRange.length === 0
? _c(
"div",
{staticClass: "el-calendar__button-group"},
[
_c(
"el-button-group",
[
_c(
"el-button",
{
attrs: {type: "plain", size: "mini"},
on: {
click: function ($event) {
_vm.selectDate("prev-month")
}
}
},
[
_vm._v(
"\n " +
_vm._s(_vm.t("el.datepicker.prevMonth")) +
"\n "
)
]
),
_c(
"el-button",
{
attrs: {type: "plain", size: "mini"},
on: {
click: function ($event) {
_vm.selectDate("today")
}
}
},
[
_vm._v(
"\n " +
_vm._s(_vm.t("el.datepicker.today")) +
"\n "
)
]
),
_c(
"el-button",
{
attrs: {type: "plain", size: "mini"},
on: {
click: function ($event) {
_vm.selectDate("next-month")
}
}
},
[
_vm._v(
"\n " +
_vm._s(_vm.t("el.datepicker.nextMonth")) +
"\n "
)
]
)
],
1
)
],
1
)
: _vm._e()
]),
_vm.validatedRange.length === 0
? _c(
"div",
{key: "no-range", staticClass: "el-calendar__body"},
[
_c("date-table", {
attrs: {
date: _vm.date,
"selected-day": _vm.realSelectedDay,
"first-day-of-week": _vm.realFirstDayOfWeek
},
on: {pick: _vm.pickDay}
})
],
1
)
: _c(
"div",
{key: "has-range", staticClass: "el-calendar__body"},
_vm._l(_vm.validatedRange, function (range, index) {
return _c("date-table", {
key: index,
attrs: {
date: range[0],
"selected-day": _vm.realSelectedDay,
range: range,
"hide-header": index !== 0,
"first-day-of-week": _vm.realFirstDayOfWeek
},
on: {pick: _vm.pickDay}
})
}),
1
)
])
}
var staticRenderFns = []
render._withStripped = true
// CONCATENATED MODULE: ./packages/calendar/src/main.vue?vue&type=template&id=6d9756be&
// EXTERNAL MODULE: external "element-ui/lib/mixins/locale"
var locale_ = __webpack_require__(6);
var locale_default = /*#__PURE__*/__webpack_require__.n(locale_);
// EXTERNAL MODULE: external "element-ui/lib/utils/date"
var date_ = __webpack_require__(24);
var date_default = /*#__PURE__*/__webpack_require__.n(date_);
// EXTERNAL MODULE: external "element-ui/lib/utils/date-util"
var date_util_ = __webpack_require__(1);
// CONCATENATED MODULE: ./node_modules/[email protected]@babel-loader/lib!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/calendar/src/date-table.vue?vue&type=script&lang=js&
var WEEK_DAYS = Object(date_util_["getI18nSettings"])().dayNames;
/* harmony default export */
var date_tablevue_type_script_lang_js_ = ({
props: {
selectedDay: String, // formated date yyyy-MM-dd
range: {
type: Array,
validator: function validator(val) {
if (!(val && val.length)) return true;
var start = val[0],
end = val[1];
return Object(date_util_["validateRangeInOneMonth"])(start, end);
}
},
date: Date,
hideHeader: Boolean,
firstDayOfWeek: Number
},
inject: ['elCalendar'],
methods: {
toNestedArr: function toNestedArr(days) {
return Object(date_util_["range"])(days.length / 7).map(function (_, index) {
var start = index * 7;
return days.slice(start, start + 7);
});
},
getFormateDate: function getFormateDate(day, type) {
if (!day || ['prev', 'current', 'next'].indexOf(type) === -1) {
throw new Error('invalid day or type');
}
var prefix = this.curMonthDatePrefix;
if (type === 'prev') {
prefix = this.prevMonthDatePrefix;
} else if (type === 'next') {
prefix = this.nextMonthDatePrefix;
}
day = ('00' + day).slice(-2);
return prefix + '-' + day;
},
getCellClass: function getCellClass(_ref) {
var text = _ref.text,
type = _ref.type;
var classes = [type];
if (type === 'current') {
var date = this.getFormateDate(text, type);
if (date === this.selectedDay) {
classes.push('is-selected');
}
if (date === this.formatedToday) {
classes.push('is-today');
}
}
return classes;
},
pickDay: function pickDay(_ref2) {
var text = _ref2.text,
type = _ref2.type;
var date = this.getFormateDate(text, type);
this.$emit('pick', date);
},
cellRenderProxy: function cellRenderProxy(_ref3) {
var text = _ref3.text,
type = _ref3.type;
var h = this.$createElement;
var render = this.elCalendar.$scopedSlots.dateCell;
if (!render) return h('span', [text]);
var day = this.getFormateDate(text, type);
var date = new Date(day);
var data = {
isSelected: this.selectedDay === day,
type: type + '-month',
day: day
};
return render({date: date, data: data});
}
},
computed: {
prevMonthDatePrefix: function prevMonthDatePrefix() {
var temp = new Date(this.date.getTime());
temp.setDate(0);
return date_default.a.format(temp, 'yyyy-MM');
},
curMonthDatePrefix: function curMonthDatePrefix() {
return date_default.a.format(this.date, 'yyyy-MM');
},
nextMonthDatePrefix: function nextMonthDatePrefix() {
var temp = new Date(this.date.getFullYear(), this.date.getMonth() + 1, 1);
return date_default.a.format(temp, 'yyyy-MM');
},
formatedToday: function formatedToday() {
return this.elCalendar.formatedToday;
},
isInRange: function isInRange() {
return this.range && this.range.length;
},
rows: function rows() {
var days = [];
// if range exists, should render days in range.
if (this.isInRange) {
var _range = this.range,
start = _range[0],
end = _range[1];
var currentMonthRange = Object(date_util_["range"])(end.getDate() - start.getDate() + 1).map(function (_, index) {
return {
text: start.getDate() + index,
type: 'current'
};
});
var remaining = currentMonthRange.length % 7;
remaining = remaining === 0 ? 0 : 7 - remaining;
var nextMonthRange = Object(date_util_["range"])(remaining).map(function (_, index) {
return {
text: index + 1,
type: 'next'
};
});
days = currentMonthRange.concat(nextMonthRange);
} else {
var date = this.date;
var firstDay = Object(date_util_["getFirstDayOfMonth"])(date);
firstDay = firstDay === 0 ? 7 : firstDay;
var firstDayOfWeek = typeof this.firstDayOfWeek === 'number' ? this.firstDayOfWeek : 1;
var prevMonthDays = Object(date_util_["getPrevMonthLastDays"])(date, firstDay - firstDayOfWeek).map(function (day) {
return {
text: day,
type: 'prev'
};
});
var currentMonthDays = Object(date_util_["getMonthDays"])(date).map(function (day) {
return {
text: day,
type: 'current'
};
});
days = [].concat(prevMonthDays, currentMonthDays);
var nextMonthDays = Object(date_util_["range"])(42 - days.length).map(function (_, index) {
return {
text: index + 1,
type: 'next'
};
});
days = days.concat(nextMonthDays);
}
return this.toNestedArr(days);
},
weekDays: function weekDays() {
var start = this.firstDayOfWeek;
if (typeof start !== 'number' || start === 0) {
return WEEK_DAYS.slice();
} else {
return WEEK_DAYS.slice(start).concat(WEEK_DAYS.slice(0, start));
}
}
},
render: function render() {
var _this = this;
var h = arguments[0];
var thead = this.hideHeader ? null : h('thead', [this.weekDays.map(function (day) {
return h(
'th',
{key: day},
[day]
);
})]);
return h(
'table',
{
'class': {
'el-calendar-table': true,
'is-range': this.isInRange
},
attrs: {
cellspacing: '0',
cellpadding: '0'
}
},
[thead, h('tbody', [this.rows.map(function (row, index) {
return h(
'tr',
{
'class': {
'el-calendar-table__row': true,
'el-calendar-table__row--hide-border': index === 0 && _this.hideHeader
},
key: index
},
[row.map(function (cell, key) {
return h(
'td',
{
key: key,
'class': _this.getCellClass(cell),
on: {
'click': _this.pickDay.bind(_this, cell)
}
},
[h(
'div',
{'class': 'el-calendar-day'},
[_this.cellRenderProxy(cell)]
)]
);
})]
);
})])]
);
}
});
// CONCATENATED MODULE: ./packages/calendar/src/date-table.vue?vue&type=script&lang=js&
/* harmony default export */
var src_date_tablevue_type_script_lang_js_ = (date_tablevue_type_script_lang_js_);
// EXTERNAL MODULE: ./node_modules/[email protected]@vue-loader/lib/runtime/componentNormalizer.js
var componentNormalizer = __webpack_require__(0);
// CONCATENATED MODULE: ./packages/calendar/src/date-table.vue
var date_table_render, date_table_staticRenderFns
/* normalize component */
var component = Object(componentNormalizer["a" /* default */])(
src_date_tablevue_type_script_lang_js_,
date_table_render,
date_table_staticRenderFns,
false,
null,
null,
null
)
/* hot reload */
if (false) {
var api;
}
component.options.__file = "packages/calendar/src/date-table.vue"
/* harmony default export */
var date_table = (component.exports);
// CONCATENATED MODULE: ./node_modules/[email protected]@babel-loader/lib!./node_modules/[email protected]@vue-loader/lib??vue-loader-options!./packages/calendar/src/main.vue?vue&type=script&lang=js&
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
var validTypes = ['prev-month', 'today', 'next-month'];
var weekDays = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday'];
var oneDay = 86400000;
/* harmony default export */
var mainvue_type_script_lang_js_ = ({
name: 'ElCalendar',
mixins: [locale_default.a],
components: {
DateTable: date_table
},
props: {
value: [Date, String, Number],
range: {
type: Array,
validator: function validator(range) {
if (Array.isArray(range)) {
return range.length === 2 && range.every(function (item) {
return typeof item === 'string' || typeof item === 'number' || item instanceof Date;
});
} else {
return true;
}
}
},
firstDayOfWeek: {
type: Number,
default: 1
}
},
provide: function provide() {
return {
elCalendar: this
};
},
methods: {
pickDay: function pickDay(day) {
this.realSelectedDay = day;
},
selectDate: function selectDate(type) {
if (validTypes.indexOf(type) === -1) {
throw new Error('invalid type ' + type);
}
var day = '';
if (type === 'prev-month') {
day = this.prevMonthDatePrefix + '-01';
} else if (type === 'next-month') {
day = this.nextMonthDatePrefix + '-01';
} else {
day = this.formatedToday;
}
if (day === this.formatedDate) return;
this.pickDay(day);
},
toDate: function toDate(val) {
if (!val) {
throw new Error('invalid val');
}
return val instanceof Date ? val : new Date(val);
},
rangeValidator: function rangeValidator(date, isStart) {
var firstDayOfWeek = this.realFirstDayOfWeek;
var expected = isStart ? firstDayOfWeek : firstDayOfWeek === 0 ? 6 : firstDayOfWeek - 1;
var message = (isStart ? 'start' : 'end') + ' of range should be ' + weekDays[expected] + '.';
if (date.getDay() !== expected) {
console.warn('[ElementCalendar]', message, 'Invalid range will be ignored.');
return false;
}
return true;
}
},
computed: {
prevMonthDatePrefix: function prevMonthDatePrefix() {
var temp = new Date(this.date.getTime());
temp.setDate(0);
return date_default.a.format(temp, 'yyyy-MM');
},
curMonthDatePrefix: function curMonthDatePrefix() {
return date_default.a.format(this.date, 'yyyy-MM');
},
nextMonthDatePrefix: function nextMonthDatePrefix() {
var temp = new Date(this.date.getFullYear(), this.date.getMonth() + 1, 1);
return date_default.a.format(temp, 'yyyy-MM');
},
formatedDate: function formatedDate() {
return date_default.a.format(this.date, 'yyyy-MM-dd');
},
i18nDate: function i18nDate() {
var year = this.date.getFullYear();
var month = this.date.getMonth() + 1;
return year + ' ' + this.t('el.datepicker.year') + ' ' + this.t('el.datepicker.month' + month);
},
formatedToday: function formatedToday() {
return date_default.a.format(this.now, 'yyyy-MM-dd');
},
realSelectedDay: {
get: function get() {
if (!this.value) return this.selectedDay;
return this.formatedDate;
},
set: function set(val) {
this.selectedDay = val;
var date = new Date(val);
this.$emit('input', date);
}
},
date: function date() {
if (!this.value) {
if (this.realSelectedDay) {
return new Date(this.selectedDay);
} else if (this.validatedRange.length) {
return this.validatedRange[0][0];
}
return this.now;
} else {
return this.toDate(this.value);
}
},
// if range is valid, we get a two-digit array
validatedRange: function validatedRange() {
var _this = this;
var range = this.range;
if (!range) return [];
range = range.reduce(function (prev, val, index) {
var date = _this.toDate(val);
if (_this.rangeValidator(date, index === 0)) {
prev = prev.concat(date);
}
return prev;
}, []);
if (range.length === 2) {
var _range = range,
start = _range[0],
end = _range[1];
if (start > end) {
console.warn('[ElementCalendar]end time should be greater than start time');
return [];
}
// start time and end time in one month
if (Object(date_util_["validateRangeInOneMonth"])(start, end)) {
return [[start, end]];
}
var data = [];
var startDay = new Date(start.getFullYear(), start.getMonth() + 1, 1);
var lastDay = this.toDate(startDay.getTime() - oneDay);
if (!Object(date_util_["validateRangeInOneMonth"])(startDay, end)) {
console.warn('[ElementCalendar]start time and end time interval must not exceed two months');
return [];
}
// 第一个月的时间范围
data.push([start, lastDay]);
// 下一月的时间范围,需要计算一下该月的第一个周起始日
var firstDayOfWeek = this.realFirstDayOfWeek;
var nextMontFirstDay = startDay.getDay();
var interval = 0;
if (nextMontFirstDay !== firstDayOfWeek) {
if (firstDayOfWeek === 0) {
interval = 7 - nextMontFirstDay;
} else {
interval = firstDayOfWeek - nextMontFirstDay;
interval = interval > 0 ? interval : 7 + interval;
}
}
startDay = this.toDate(startDay.getTime() + interval * oneDay);
if (startDay.getDate() < end.getDate()) {
data.push([startDay, end]);
}
return data;
}
return [];
},
realFirstDayOfWeek: function realFirstDayOfWeek() {
if (this.firstDayOfWeek < 1 || this.firstDayOfWeek > 6) {
return 0;
}
return Math.floor(this.firstDayOfWeek);
}
},
data: function data() {
return {
selectedDay: '',
now: new Date()
};
}
});
// CONCATENATED MODULE: ./packages/calendar/src/main.vue?vue&type=script&lang=js&
/* harmony default export */
var src_mainvue_type_script_lang_js_ = (mainvue_type_script_lang_js_);
// CONCATENATED MODULE: ./packages/calendar/src/main.vue
/* normalize component */
var main_component = Object(componentNormalizer["a" /* default */])(
src_mainvue_type_script_lang_js_,
render,
staticRenderFns,
false,
null,
null,
null
)
/* hot reload */
if (false) {
var main_api;
}
main_component.options.__file = "packages/calendar/src/main.vue"
/* harmony default export */
var main = (main_component.exports);
// CONCATENATED MODULE: ./packages/calendar/index.js
/* istanbul ignore next */
main.install = function (Vue) {
Vue.component(main.name, main);
};
/* harmony default export */
var calendar = __webpack_exports__["default"] = (main);
/***/
})
/******/
}); | PypiClean |
/LinOTP-2.11.1.tar.gz/LinOTP-2.11.1/linotp/public/js/jed.js | -----------
A gettext compatible i18n library for modern JavaScript Applications
by Alex Sexton - AlexSexton [at] gmail - @SlexAxton
WTFPL license for use
Dojo CLA for contributions
Jed offers the entire applicable GNU gettext spec'd set of
functions, but also offers some nicer wrappers around them.
The api for gettext was written for a language with no function
overloading, so Jed allows a little more of that.
Many thanks to Joshua I. Miller - [email protected] - who wrote
gettext.js back in 2008. I was able to vet a lot of my ideas
against his. I also made sure Jed passed against his tests
in order to offer easy upgrades -- jsgettext.berlios.de
*/
(function (root, undef) {
// Set up some underscore-style functions, if you already have
// underscore, feel free to delete this section, and use it
// directly, however, the amount of functions used doesn't
// warrant having underscore as a full dependency.
// Underscore 1.3.0 was used to port and is licensed
// under the MIT License by Jeremy Ashkenas.
var ArrayProto = Array.prototype,
ObjProto = Object.prototype,
slice = ArrayProto.slice,
hasOwnProp = ObjProto.hasOwnProperty,
nativeForEach = ArrayProto.forEach,
breaker = {};
// We're not using the OOP style _ so we don't need the
// extra level of indirection. This still means that you
// sub out for real `_` though.
var _ = {
forEach : function( obj, iterator, context ) {
var i, l, key;
if ( obj === null ) {
return;
}
if ( nativeForEach && obj.forEach === nativeForEach ) {
obj.forEach( iterator, context );
}
else if ( obj.length === +obj.length ) {
for ( i = 0, l = obj.length; i < l; i++ ) {
if ( i in obj && iterator.call( context, obj[i], i, obj ) === breaker ) {
return;
}
}
}
else {
for ( key in obj) {
if ( hasOwnProp.call( obj, key ) ) {
if ( iterator.call (context, obj[key], key, obj ) === breaker ) {
return;
}
}
}
}
},
extend : function( obj ) {
this.forEach( slice.call( arguments, 1 ), function ( source ) {
for ( var prop in source ) {
obj[prop] = source[prop];
}
});
return obj;
}
};
// END Miniature underscore impl
// Jed is a constructor function
var Jed = function ( options ) {
// Some minimal defaults
this.defaults = {
"locale_data" : {
"messages" : {
"" : {
"domain" : "messages",
"lang" : "en",
"plural_forms" : "nplurals=2; plural=(n != 1);"
}
// There are no default keys, though
}
},
// The default domain if one is missing
"domain" : "messages",
// enable debug mode to log untranslated strings to the console
"debug" : false
};
// Mix in the sent options with the default options
this.options = _.extend( {}, this.defaults, options );
this.textdomain( this.options.domain );
if ( options.domain && ! this.options.locale_data[ this.options.domain ] ) {
throw new Error('Text domain set to non-existent domain: `' + options.domain + '`');
}
};
// The gettext spec sets this character as the default
// delimiter for context lookups.
// e.g.: context\u0004key
// If your translation company uses something different,
// just change this at any time and it will use that instead.
Jed.context_delimiter = String.fromCharCode( 4 );
function getPluralFormFunc ( plural_form_string ) {
return Jed.PF.compile( plural_form_string || "nplurals=2; plural=(n != 1);");
}
function Chain( key, i18n ){
this._key = key;
this._i18n = i18n;
}
// Create a chainable api for adding args prettily
_.extend( Chain.prototype, {
onDomain : function ( domain ) {
this._domain = domain;
return this;
},
withContext : function ( context ) {
this._context = context;
return this;
},
ifPlural : function ( num, pkey ) {
this._val = num;
this._pkey = pkey;
return this;
},
fetch : function ( sArr ) {
if ( {}.toString.call( sArr ) != '[object Array]' ) {
sArr = [].slice.call(arguments, 0);
}
return ( sArr && sArr.length ? Jed.sprintf : function(x){ return x; } )(
this._i18n.dcnpgettext(this._domain, this._context, this._key, this._pkey, this._val),
sArr
);
}
});
// Add functions to the Jed prototype.
// These will be the functions on the object that's returned
// from creating a `new Jed()`
// These seem redundant, but they gzip pretty well.
_.extend( Jed.prototype, {
// The sexier api start point
translate : function ( key ) {
return new Chain( key, this );
},
textdomain : function ( domain ) {
if ( ! domain ) {
return this._textdomain;
}
this._textdomain = domain;
},
gettext : function ( key ) {
return this.dcnpgettext.call( this, undef, undef, key );
},
dgettext : function ( domain, key ) {
return this.dcnpgettext.call( this, domain, undef, key );
},
dcgettext : function ( domain , key /*, category */ ) {
// Ignores the category anyways
return this.dcnpgettext.call( this, domain, undef, key );
},
ngettext : function ( skey, pkey, val ) {
return this.dcnpgettext.call( this, undef, undef, skey, pkey, val );
},
dngettext : function ( domain, skey, pkey, val ) {
return this.dcnpgettext.call( this, domain, undef, skey, pkey, val );
},
dcngettext : function ( domain, skey, pkey, val/*, category */) {
return this.dcnpgettext.call( this, domain, undef, skey, pkey, val );
},
pgettext : function ( context, key ) {
return this.dcnpgettext.call( this, undef, context, key );
},
dpgettext : function ( domain, context, key ) {
return this.dcnpgettext.call( this, domain, context, key );
},
dcpgettext : function ( domain, context, key/*, category */) {
return this.dcnpgettext.call( this, domain, context, key );
},
npgettext : function ( context, skey, pkey, val ) {
return this.dcnpgettext.call( this, undef, context, skey, pkey, val );
},
dnpgettext : function ( domain, context, skey, pkey, val ) {
return this.dcnpgettext.call( this, domain, context, skey, pkey, val );
},
// The most fully qualified gettext function. It has every option.
// Since it has every option, we can use it from every other method.
// This is the bread and butter.
// Technically there should be one more argument in this function for 'Category',
// but since we never use it, we might as well not waste the bytes to define it.
dcnpgettext : function ( domain, context, singular_key, plural_key, val ) {
// Set some defaults
plural_key = plural_key || singular_key;
// Use the global domain default if one
// isn't explicitly passed in
domain = domain || this._textdomain;
var fallback;
// Handle special cases
// No options found
if ( ! this.options ) {
// There's likely something wrong, but we'll return the correct key for english
// We do this by instantiating a brand new Jed instance with the default set
// for everything that could be broken.
fallback = new Jed();
return fallback.dcnpgettext.call( fallback, undefined, undefined, singular_key, plural_key, val );
}
// No translation data provided
if ( ! this.options.locale_data ) {
throw new Error('No locale data provided.');
}
if ( ! this.options.locale_data[ domain ] ) {
throw new Error('Domain `' + domain + '` was not found.');
}
if ( ! this.options.locale_data[ domain ][ "" ] ) {
throw new Error('No locale meta information provided.');
}
// Make sure we have a truthy key. Otherwise we might start looking
// into the empty string key, which is the options for the locale
// data.
if ( ! singular_key ) {
throw new Error('No translation key found.');
}
var key = context ? context + Jed.context_delimiter + singular_key : singular_key,
locale_data = this.options.locale_data,
dict = locale_data[ domain ],
defaultConf = (locale_data.messages || this.defaults.locale_data.messages)[""],
pluralForms = dict[""].plural_forms || dict[""]["Plural-Forms"] || dict[""]["plural-forms"] || defaultConf.plural_forms || defaultConf["Plural-Forms"] || defaultConf["plural-forms"],
val_list,
res;
var val_idx;
if (val === undefined) {
// No value passed in; assume singular key lookup.
val_idx = 1;
} else {
// Value has been passed in; use plural-forms calculations.
// Handle invalid numbers, but try casting strings for good measure
if ( typeof val != 'number' ) {
val = parseInt( val, 10 );
if ( isNaN( val ) ) {
throw new Error('The number that was passed in is not a number.');
}
}
val_idx = getPluralFormFunc(pluralForms)(val) + 1;
}
// Throw an error if a domain isn't found
if ( ! dict ) {
throw new Error('No domain named `' + domain + '` could be found.');
}
val_list = dict[ key ];
// If there is no match, then revert back to
// english style singular/plural with the keys passed in.
if ( ! val_list || val_idx >= val_list.length ) {
if (this.options.missing_key_callback) {
this.options.missing_key_callback(key, domain);
}
res = [ null, singular_key, plural_key ];
// collect untranslated strings
if (this.options.debug===true) {
console.log(res[ getPluralFormFunc(pluralForms)( val ) + 1 ]);
}
return res[ getPluralFormFunc()( val ) + 1 ];
}
res = val_list[ val_idx ];
// This includes empty strings on purpose
if ( ! res ) {
res = [ null, singular_key, plural_key ];
return res[ getPluralFormFunc()( val ) + 1 ];
}
return res;
}
});
// We add in sprintf capabilities for post translation value interolation
// This is not internally used, so you can remove it if you have this
// available somewhere else, or want to use a different system.
// We _slightly_ modify the normal sprintf behavior to more gracefully handle
// undefined values.
/**
sprintf() for JavaScript 0.7-beta1
http://www.diveintojavascript.com/projects/javascript-sprintf
Copyright (c) Alexandru Marasteanu <alexaholic [at) gmail (dot] com>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of sprintf() for JavaScript nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL Alexandru Marasteanu BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
var sprintf = (function() {
function get_type(variable) {
return Object.prototype.toString.call(variable).slice(8, -1).toLowerCase();
}
function str_repeat(input, multiplier) {
for (var output = []; multiplier > 0; output[--multiplier] = input) {/* do nothing */}
return output.join('');
}
var str_format = function() {
if (!str_format.cache.hasOwnProperty(arguments[0])) {
str_format.cache[arguments[0]] = str_format.parse(arguments[0]);
}
return str_format.format.call(null, str_format.cache[arguments[0]], arguments);
};
str_format.format = function(parse_tree, argv) {
var cursor = 1, tree_length = parse_tree.length, node_type = '', arg, output = [], i, k, match, pad, pad_character, pad_length;
for (i = 0; i < tree_length; i++) {
node_type = get_type(parse_tree[i]);
if (node_type === 'string') {
output.push(parse_tree[i]);
}
else if (node_type === 'array') {
match = parse_tree[i]; // convenience purposes only
if (match[2]) { // keyword argument
arg = argv[cursor];
for (k = 0; k < match[2].length; k++) {
if (!arg.hasOwnProperty(match[2][k])) {
throw(sprintf('[sprintf] property "%s" does not exist', match[2][k]));
}
arg = arg[match[2][k]];
}
}
else if (match[1]) { // positional argument (explicit)
arg = argv[match[1]];
}
else { // positional argument (implicit)
arg = argv[cursor++];
}
if (/[^s]/.test(match[8]) && (get_type(arg) != 'number')) {
throw(sprintf('[sprintf] expecting number but found %s', get_type(arg)));
}
// Jed EDIT
if ( typeof arg == 'undefined' || arg === null ) {
arg = '';
}
// Jed EDIT
switch (match[8]) {
case 'b': arg = arg.toString(2); break;
case 'c': arg = String.fromCharCode(arg); break;
case 'd': arg = parseInt(arg, 10); break;
case 'e': arg = match[7] ? arg.toExponential(match[7]) : arg.toExponential(); break;
case 'f': arg = match[7] ? parseFloat(arg).toFixed(match[7]) : parseFloat(arg); break;
case 'o': arg = arg.toString(8); break;
case 's': arg = ((arg = String(arg)) && match[7] ? arg.substring(0, match[7]) : arg); break;
case 'u': arg = Math.abs(arg); break;
case 'x': arg = arg.toString(16); break;
case 'X': arg = arg.toString(16).toUpperCase(); break;
}
arg = (/[def]/.test(match[8]) && match[3] && arg >= 0 ? '+'+ arg : arg);
pad_character = match[4] ? match[4] == '0' ? '0' : match[4].charAt(1) : ' ';
pad_length = match[6] - String(arg).length;
pad = match[6] ? str_repeat(pad_character, pad_length) : '';
output.push(match[5] ? arg + pad : pad + arg);
}
}
return output.join('');
};
str_format.cache = {};
str_format.parse = function(fmt) {
var _fmt = fmt, match = [], parse_tree = [], arg_names = 0;
while (_fmt) {
if ((match = /^[^\x25]+/.exec(_fmt)) !== null) {
parse_tree.push(match[0]);
}
else if ((match = /^\x25{2}/.exec(_fmt)) !== null) {
parse_tree.push('%');
}
else if ((match = /^\x25(?:([1-9]\d*)\$|\(([^\)]+)\))?(\+)?(0|'[^$])?(-)?(\d+)?(?:\.(\d+))?([b-fosuxX])/.exec(_fmt)) !== null) {
if (match[2]) {
arg_names |= 1;
var field_list = [], replacement_field = match[2], field_match = [];
if ((field_match = /^([a-z_][a-z_\d]*)/i.exec(replacement_field)) !== null) {
field_list.push(field_match[1]);
while ((replacement_field = replacement_field.substring(field_match[0].length)) !== '') {
if ((field_match = /^\.([a-z_][a-z_\d]*)/i.exec(replacement_field)) !== null) {
field_list.push(field_match[1]);
}
else if ((field_match = /^\[(\d+)\]/.exec(replacement_field)) !== null) {
field_list.push(field_match[1]);
}
else {
throw('[sprintf] huh?');
}
}
}
else {
throw('[sprintf] huh?');
}
match[2] = field_list;
}
else {
arg_names |= 2;
}
if (arg_names === 3) {
throw('[sprintf] mixing positional and named placeholders is not (yet) supported');
}
parse_tree.push(match);
}
else {
throw('[sprintf] huh?');
}
_fmt = _fmt.substring(match[0].length);
}
return parse_tree;
};
return str_format;
})();
var vsprintf = function(fmt, argv) {
argv.unshift(fmt);
return sprintf.apply(null, argv);
};
Jed.parse_plural = function ( plural_forms, n ) {
plural_forms = plural_forms.replace(/n/g, n);
return Jed.parse_expression(plural_forms);
};
Jed.sprintf = function ( fmt, args ) {
if ( {}.toString.call( args ) == '[object Array]' ) {
return vsprintf( fmt, [].slice.call(args) );
}
return sprintf.apply(this, [].slice.call(arguments) );
};
Jed.prototype.sprintf = function () {
return Jed.sprintf.apply(this, arguments);
};
// END sprintf Implementation
// Start the Plural forms section
// This is a full plural form expression parser. It is used to avoid
// running 'eval' or 'new Function' directly against the plural
// forms.
//
// This can be important if you get translations done through a 3rd
// party vendor. I encourage you to use this instead, however, I
// also will provide a 'precompiler' that you can use at build time
// to output valid/safe function representations of the plural form
// expressions. This means you can build this code out for the most
// part.
Jed.PF = {};
Jed.PF.parse = function ( p ) {
var plural_str = Jed.PF.extractPluralExpr( p );
return Jed.PF.parser.parse.call(Jed.PF.parser, plural_str);
};
Jed.PF.compile = function ( p ) {
// Handle trues and falses as 0 and 1
function imply( val ) {
return (val === true ? 1 : val ? val : 0);
}
var ast = Jed.PF.parse( p );
return function ( n ) {
return imply( Jed.PF.interpreter( ast )( n ) );
};
};
Jed.PF.interpreter = function ( ast ) {
return function ( n ) {
var res;
switch ( ast.type ) {
case 'GROUP':
return Jed.PF.interpreter( ast.expr )( n );
case 'TERNARY':
if ( Jed.PF.interpreter( ast.expr )( n ) ) {
return Jed.PF.interpreter( ast.truthy )( n );
}
return Jed.PF.interpreter( ast.falsey )( n );
case 'OR':
return Jed.PF.interpreter( ast.left )( n ) || Jed.PF.interpreter( ast.right )( n );
case 'AND':
return Jed.PF.interpreter( ast.left )( n ) && Jed.PF.interpreter( ast.right )( n );
case 'LT':
return Jed.PF.interpreter( ast.left )( n ) < Jed.PF.interpreter( ast.right )( n );
case 'GT':
return Jed.PF.interpreter( ast.left )( n ) > Jed.PF.interpreter( ast.right )( n );
case 'LTE':
return Jed.PF.interpreter( ast.left )( n ) <= Jed.PF.interpreter( ast.right )( n );
case 'GTE':
return Jed.PF.interpreter( ast.left )( n ) >= Jed.PF.interpreter( ast.right )( n );
case 'EQ':
return Jed.PF.interpreter( ast.left )( n ) == Jed.PF.interpreter( ast.right )( n );
case 'NEQ':
return Jed.PF.interpreter( ast.left )( n ) != Jed.PF.interpreter( ast.right )( n );
case 'MOD':
return Jed.PF.interpreter( ast.left )( n ) % Jed.PF.interpreter( ast.right )( n );
case 'VAR':
return n;
case 'NUM':
return ast.val;
default:
throw new Error("Invalid Token found.");
}
};
};
Jed.PF.extractPluralExpr = function ( p ) {
// trim first
p = p.replace(/^\s\s*/, '').replace(/\s\s*$/, '');
if (! /;\s*$/.test(p)) {
p = p.concat(';');
}
var nplurals_re = /nplurals\=(\d+);/,
plural_re = /plural\=(.*);/,
nplurals_matches = p.match( nplurals_re ),
res = {},
plural_matches;
// Find the nplurals number
if ( nplurals_matches.length > 1 ) {
res.nplurals = nplurals_matches[1];
}
else {
throw new Error('nplurals not found in plural_forms string: ' + p );
}
// remove that data to get to the formula
p = p.replace( nplurals_re, "" );
plural_matches = p.match( plural_re );
if (!( plural_matches && plural_matches.length > 1 ) ) {
throw new Error('`plural` expression not found: ' + p);
}
return plural_matches[ 1 ];
};
/* Jison generated parser */
Jed.PF.parser = (function(){
var parser = {trace: function trace() { },
yy: {},
symbols_: {"error":2,"expressions":3,"e":4,"EOF":5,"?":6,":":7,"||":8,"&&":9,"<":10,"<=":11,">":12,">=":13,"!=":14,"==":15,"%":16,"(":17,")":18,"n":19,"NUMBER":20,"$accept":0,"$end":1},
terminals_: {2:"error",5:"EOF",6:"?",7:":",8:"||",9:"&&",10:"<",11:"<=",12:">",13:">=",14:"!=",15:"==",16:"%",17:"(",18:")",19:"n",20:"NUMBER"},
productions_: [0,[3,2],[4,5],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,3],[4,1],[4,1]],
performAction: function anonymous(yytext,yyleng,yylineno,yy,yystate,$$,_$) {
var $0 = $$.length - 1;
switch (yystate) {
case 1: return { type : 'GROUP', expr: $$[$0-1] };
break;
case 2:this.$ = { type: 'TERNARY', expr: $$[$0-4], truthy : $$[$0-2], falsey: $$[$0] };
break;
case 3:this.$ = { type: "OR", left: $$[$0-2], right: $$[$0] };
break;
case 4:this.$ = { type: "AND", left: $$[$0-2], right: $$[$0] };
break;
case 5:this.$ = { type: 'LT', left: $$[$0-2], right: $$[$0] };
break;
case 6:this.$ = { type: 'LTE', left: $$[$0-2], right: $$[$0] };
break;
case 7:this.$ = { type: 'GT', left: $$[$0-2], right: $$[$0] };
break;
case 8:this.$ = { type: 'GTE', left: $$[$0-2], right: $$[$0] };
break;
case 9:this.$ = { type: 'NEQ', left: $$[$0-2], right: $$[$0] };
break;
case 10:this.$ = { type: 'EQ', left: $$[$0-2], right: $$[$0] };
break;
case 11:this.$ = { type: 'MOD', left: $$[$0-2], right: $$[$0] };
break;
case 12:this.$ = { type: 'GROUP', expr: $$[$0-1] };
break;
case 13:this.$ = { type: 'VAR' };
break;
case 14:this.$ = { type: 'NUM', val: Number(yytext) };
break;
}
},
table: [{3:1,4:2,17:[1,3],19:[1,4],20:[1,5]},{1:[3]},{5:[1,6],6:[1,7],8:[1,8],9:[1,9],10:[1,10],11:[1,11],12:[1,12],13:[1,13],14:[1,14],15:[1,15],16:[1,16]},{4:17,17:[1,3],19:[1,4],20:[1,5]},{5:[2,13],6:[2,13],7:[2,13],8:[2,13],9:[2,13],10:[2,13],11:[2,13],12:[2,13],13:[2,13],14:[2,13],15:[2,13],16:[2,13],18:[2,13]},{5:[2,14],6:[2,14],7:[2,14],8:[2,14],9:[2,14],10:[2,14],11:[2,14],12:[2,14],13:[2,14],14:[2,14],15:[2,14],16:[2,14],18:[2,14]},{1:[2,1]},{4:18,17:[1,3],19:[1,4],20:[1,5]},{4:19,17:[1,3],19:[1,4],20:[1,5]},{4:20,17:[1,3],19:[1,4],20:[1,5]},{4:21,17:[1,3],19:[1,4],20:[1,5]},{4:22,17:[1,3],19:[1,4],20:[1,5]},{4:23,17:[1,3],19:[1,4],20:[1,5]},{4:24,17:[1,3],19:[1,4],20:[1,5]},{4:25,17:[1,3],19:[1,4],20:[1,5]},{4:26,17:[1,3],19:[1,4],20:[1,5]},{4:27,17:[1,3],19:[1,4],20:[1,5]},{6:[1,7],8:[1,8],9:[1,9],10:[1,10],11:[1,11],12:[1,12],13:[1,13],14:[1,14],15:[1,15],16:[1,16],18:[1,28]},{6:[1,7],7:[1,29],8:[1,8],9:[1,9],10:[1,10],11:[1,11],12:[1,12],13:[1,13],14:[1,14],15:[1,15],16:[1,16]},{5:[2,3],6:[2,3],7:[2,3],8:[2,3],9:[1,9],10:[1,10],11:[1,11],12:[1,12],13:[1,13],14:[1,14],15:[1,15],16:[1,16],18:[2,3]},{5:[2,4],6:[2,4],7:[2,4],8:[2,4],9:[2,4],10:[1,10],11:[1,11],12:[1,12],13:[1,13],14:[1,14],15:[1,15],16:[1,16],18:[2,4]},{5:[2,5],6:[2,5],7:[2,5],8:[2,5],9:[2,5],10:[2,5],11:[2,5],12:[2,5],13:[2,5],14:[2,5],15:[2,5],16:[1,16],18:[2,5]},{5:[2,6],6:[2,6],7:[2,6],8:[2,6],9:[2,6],10:[2,6],11:[2,6],12:[2,6],13:[2,6],14:[2,6],15:[2,6],16:[1,16],18:[2,6]},{5:[2,7],6:[2,7],7:[2,7],8:[2,7],9:[2,7],10:[2,7],11:[2,7],12:[2,7],13:[2,7],14:[2,7],15:[2,7],16:[1,16],18:[2,7]},{5:[2,8],6:[2,8],7:[2,8],8:[2,8],9:[2,8],10:[2,8],11:[2,8],12:[2,8],13:[2,8],14:[2,8],15:[2,8],16:[1,16],18:[2,8]},{5:[2,9],6:[2,9],7:[2,9],8:[2,9],9:[2,9],10:[2,9],11:[2,9],12:[2,9],13:[2,9],14:[2,9],15:[2,9],16:[1,16],18:[2,9]},{5:[2,10],6:[2,10],7:[2,10],8:[2,10],9:[2,10],10:[2,10],11:[2,10],12:[2,10],13:[2,10],14:[2,10],15:[2,10],16:[1,16],18:[2,10]},{5:[2,11],6:[2,11],7:[2,11],8:[2,11],9:[2,11],10:[2,11],11:[2,11],12:[2,11],13:[2,11],14:[2,11],15:[2,11],16:[2,11],18:[2,11]},{5:[2,12],6:[2,12],7:[2,12],8:[2,12],9:[2,12],10:[2,12],11:[2,12],12:[2,12],13:[2,12],14:[2,12],15:[2,12],16:[2,12],18:[2,12]},{4:30,17:[1,3],19:[1,4],20:[1,5]},{5:[2,2],6:[1,7],7:[2,2],8:[1,8],9:[1,9],10:[1,10],11:[1,11],12:[1,12],13:[1,13],14:[1,14],15:[1,15],16:[1,16],18:[2,2]}],
defaultActions: {6:[2,1]},
parseError: function parseError(str, hash) {
throw new Error(str);
},
parse: function parse(input) {
var self = this,
stack = [0],
vstack = [null], // semantic value stack
lstack = [], // location stack
table = this.table,
yytext = '',
yylineno = 0,
yyleng = 0,
recovering = 0,
TERROR = 2,
EOF = 1;
//this.reductionCount = this.shiftCount = 0;
this.lexer.setInput(input);
this.lexer.yy = this.yy;
this.yy.lexer = this.lexer;
if (typeof this.lexer.yylloc == 'undefined')
this.lexer.yylloc = {};
var yyloc = this.lexer.yylloc;
lstack.push(yyloc);
if (typeof this.yy.parseError === 'function')
this.parseError = this.yy.parseError;
function popStack (n) {
stack.length = stack.length - 2*n;
vstack.length = vstack.length - n;
lstack.length = lstack.length - n;
}
function lex() {
var token;
token = self.lexer.lex() || 1; // $end = 1
// if token isn't its numeric value, convert
if (typeof token !== 'number') {
token = self.symbols_[token] || token;
}
return token;
}
var symbol, preErrorSymbol, state, action, a, r, yyval={},p,len,newState, expected;
while (true) {
// retreive state number from top of stack
state = stack[stack.length-1];
// use default actions if available
if (this.defaultActions[state]) {
action = this.defaultActions[state];
} else {
if (symbol == null)
symbol = lex();
// read action for current state and first input
action = table[state] && table[state][symbol];
}
// handle parse error
_handle_error:
if (typeof action === 'undefined' || !action.length || !action[0]) {
if (!recovering) {
// Report error
expected = [];
for (p in table[state]) if (this.terminals_[p] && p > 2) {
expected.push("'"+this.terminals_[p]+"'");
}
var errStr = '';
if (this.lexer.showPosition) {
errStr = 'Parse error on line '+(yylineno+1)+":\n"+this.lexer.showPosition()+"\nExpecting "+expected.join(', ') + ", got '" + this.terminals_[symbol]+ "'";
} else {
errStr = 'Parse error on line '+(yylineno+1)+": Unexpected " +
(symbol == 1 /*EOF*/ ? "end of input" :
("'"+(this.terminals_[symbol] || symbol)+"'"));
}
this.parseError(errStr,
{text: this.lexer.match, token: this.terminals_[symbol] || symbol, line: this.lexer.yylineno, loc: yyloc, expected: expected});
}
// just recovered from another error
if (recovering == 3) {
if (symbol == EOF) {
throw new Error(errStr || 'Parsing halted.');
}
// discard current lookahead and grab another
yyleng = this.lexer.yyleng;
yytext = this.lexer.yytext;
yylineno = this.lexer.yylineno;
yyloc = this.lexer.yylloc;
symbol = lex();
}
// try to recover from error
while (1) {
// check for error recovery rule in this state
if ((TERROR.toString()) in table[state]) {
break;
}
if (state == 0) {
throw new Error(errStr || 'Parsing halted.');
}
popStack(1);
state = stack[stack.length-1];
}
preErrorSymbol = symbol; // save the lookahead token
symbol = TERROR; // insert generic error symbol as new lookahead
state = stack[stack.length-1];
action = table[state] && table[state][TERROR];
recovering = 3; // allow 3 real symbols to be shifted before reporting a new error
}
// this shouldn't happen, unless resolve defaults are off
if (action[0] instanceof Array && action.length > 1) {
throw new Error('Parse Error: multiple actions possible at state: '+state+', token: '+symbol);
}
switch (action[0]) {
case 1: // shift
//this.shiftCount++;
stack.push(symbol);
vstack.push(this.lexer.yytext);
lstack.push(this.lexer.yylloc);
stack.push(action[1]); // push state
symbol = null;
if (!preErrorSymbol) { // normal execution/no error
yyleng = this.lexer.yyleng;
yytext = this.lexer.yytext;
yylineno = this.lexer.yylineno;
yyloc = this.lexer.yylloc;
if (recovering > 0)
recovering--;
} else { // error just occurred, resume old lookahead f/ before error
symbol = preErrorSymbol;
preErrorSymbol = null;
}
break;
case 2: // reduce
//this.reductionCount++;
len = this.productions_[action[1]][1];
// perform semantic action
yyval.$ = vstack[vstack.length-len]; // default to $$ = $1
// default location, uses first token for firsts, last for lasts
yyval._$ = {
first_line: lstack[lstack.length-(len||1)].first_line,
last_line: lstack[lstack.length-1].last_line,
first_column: lstack[lstack.length-(len||1)].first_column,
last_column: lstack[lstack.length-1].last_column
};
r = this.performAction.call(yyval, yytext, yyleng, yylineno, this.yy, action[1], vstack, lstack);
if (typeof r !== 'undefined') {
return r;
}
// pop off stack
if (len) {
stack = stack.slice(0,-1*len*2);
vstack = vstack.slice(0, -1*len);
lstack = lstack.slice(0, -1*len);
}
stack.push(this.productions_[action[1]][0]); // push nonterminal (reduce)
vstack.push(yyval.$);
lstack.push(yyval._$);
// goto new state = table[STATE][NONTERMINAL]
newState = table[stack[stack.length-2]][stack[stack.length-1]];
stack.push(newState);
break;
case 3: // accept
return true;
}
}
return true;
}};/* Jison generated lexer */
var lexer = (function(){
var lexer = ({EOF:1,
parseError:function parseError(str, hash) {
if (this.yy.parseError) {
this.yy.parseError(str, hash);
} else {
throw new Error(str);
}
},
setInput:function (input) {
this._input = input;
this._more = this._less = this.done = false;
this.yylineno = this.yyleng = 0;
this.yytext = this.matched = this.match = '';
this.conditionStack = ['INITIAL'];
this.yylloc = {first_line:1,first_column:0,last_line:1,last_column:0};
return this;
},
input:function () {
var ch = this._input[0];
this.yytext+=ch;
this.yyleng++;
this.match+=ch;
this.matched+=ch;
var lines = ch.match(/\n/);
if (lines) this.yylineno++;
this._input = this._input.slice(1);
return ch;
},
unput:function (ch) {
this._input = ch + this._input;
return this;
},
more:function () {
this._more = true;
return this;
},
pastInput:function () {
var past = this.matched.substr(0, this.matched.length - this.match.length);
return (past.length > 20 ? '...':'') + past.substr(-20).replace(/\n/g, "");
},
upcomingInput:function () {
var next = this.match;
if (next.length < 20) {
next += this._input.substr(0, 20-next.length);
}
return (next.substr(0,20)+(next.length > 20 ? '...':'')).replace(/\n/g, "");
},
showPosition:function () {
var pre = this.pastInput();
var c = new Array(pre.length + 1).join("-");
return pre + this.upcomingInput() + "\n" + c+"^";
},
next:function () {
if (this.done) {
return this.EOF;
}
if (!this._input) this.done = true;
var token,
match,
col,
lines;
if (!this._more) {
this.yytext = '';
this.match = '';
}
var rules = this._currentRules();
for (var i=0;i < rules.length; i++) {
match = this._input.match(this.rules[rules[i]]);
if (match) {
lines = match[0].match(/\n.*/g);
if (lines) this.yylineno += lines.length;
this.yylloc = {first_line: this.yylloc.last_line,
last_line: this.yylineno+1,
first_column: this.yylloc.last_column,
last_column: lines ? lines[lines.length-1].length-1 : this.yylloc.last_column + match[0].length}
this.yytext += match[0];
this.match += match[0];
this.matches = match;
this.yyleng = this.yytext.length;
this._more = false;
this._input = this._input.slice(match[0].length);
this.matched += match[0];
token = this.performAction.call(this, this.yy, this, rules[i],this.conditionStack[this.conditionStack.length-1]);
if (token) return token;
else return;
}
}
if (this._input === "") {
return this.EOF;
} else {
this.parseError('Lexical error on line '+(this.yylineno+1)+'. Unrecognized text.\n'+this.showPosition(),
{text: "", token: null, line: this.yylineno});
}
},
lex:function lex() {
var r = this.next();
if (typeof r !== 'undefined') {
return r;
} else {
return this.lex();
}
},
begin:function begin(condition) {
this.conditionStack.push(condition);
},
popState:function popState() {
return this.conditionStack.pop();
},
_currentRules:function _currentRules() {
return this.conditions[this.conditionStack[this.conditionStack.length-1]].rules;
},
topState:function () {
return this.conditionStack[this.conditionStack.length-2];
},
pushState:function begin(condition) {
this.begin(condition);
}});
lexer.performAction = function anonymous(yy,yy_,$avoiding_name_collisions,YY_START) {
var YYSTATE=YY_START;
switch($avoiding_name_collisions) {
case 0:/* skip whitespace */
break;
case 1:return 20
break;
case 2:return 19
break;
case 3:return 8
break;
case 4:return 9
break;
case 5:return 6
break;
case 6:return 7
break;
case 7:return 11
break;
case 8:return 13
break;
case 9:return 10
break;
case 10:return 12
break;
case 11:return 14
break;
case 12:return 15
break;
case 13:return 16
break;
case 14:return 17
break;
case 15:return 18
break;
case 16:return 5
break;
case 17:return 'INVALID'
break;
}
};
lexer.rules = [/^\s+/,/^[0-9]+(\.[0-9]+)?\b/,/^n\b/,/^\|\|/,/^&&/,/^\?/,/^:/,/^<=/,/^>=/,/^</,/^>/,/^!=/,/^==/,/^%/,/^\(/,/^\)/,/^$/,/^./];
lexer.conditions = {"INITIAL":{"rules":[0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17],"inclusive":true}};return lexer;})()
parser.lexer = lexer;
return parser;
})();
// End parser
// Handle node, amd, and global systems
if (typeof exports !== 'undefined') {
if (typeof module !== 'undefined' && module.exports) {
exports = module.exports = Jed;
}
exports.Jed = Jed;
}
else {
if (typeof define === 'function' && define.amd) {
define('jed', function() {
return Jed;
});
}
// Leak a global regardless of module system
root['Jed'] = Jed;
}
})(this); | PypiClean |
/Flask_MAB-3.0.0-py3-none-any.whl/flask_mab/bandits.py | from random import random, choice, uniform, betavariate
from math import log, exp, expm1
class Bandit(object):
"""The primary bandit interface. Don't use this unless you really
want uniform random arm selection (which defeats the whole purpose, really)
Used as a control to test against and as an interface to define methods against.
"""
@classmethod
def fromdict(cls, dict_spec):
extra_args = dict(
[
(key, value)
for key, value in dict_spec.items()
if key
not in [
"arms",
"pulls",
"reward",
"values",
"bandit_type",
"confidence",
]
]
)
bandit = globals()[dict_spec["bandit_type"]](**extra_args)
bandit.arms = dict_spec["arms"]
bandit.pulls = dict_spec["pulls"]
bandit.reward = dict_spec["reward"]
bandit.values = dict_spec["values"]
bandit.confidence = dict_spec.get("confidence", [0.0] * len(bandit.arms))
return bandit
def __init__(self):
self.arms = []
self.pulls = []
self.reward = []
self.values = []
self.confidence = []
def add_arm(self, arm_id, value=None):
self.arms.append(arm_id)
self.pulls.append(0)
self.reward.append(0.0)
self.confidence.append(0.0)
self.values.append(value)
def pull_arm(self, arm_id):
ind = self.arms.index(arm_id)
if ind > -1:
self.pulls[ind] += 1
def reward_arm(self, arm_id, reward):
ind = self.arms.index(arm_id)
if ind > -1:
self.reward[ind] += reward
self._update(ind, reward)
def _update(self, arm_index, reward):
n = max(1, self.pulls[arm_index])
current = self.confidence[arm_index]
self.confidence[arm_index] = ((n - 1) / float(n)) * current + (
1 / float(n)
) * reward
def suggest_arm(self):
"""Uniform random for default bandit.
Just uses random.choice to choose between arms
"""
return self[random.choice(self.arms)]
def __getitem__(self, key):
ind = self.arms.index(key)
if ind > -1:
arm = {
"id": self.arms[ind],
"pulls": self.pulls[ind],
"reward": self.reward[ind],
"value": self.values[ind],
}
return arm
else:
raise KeyError("Arm is not found in this bandit")
def __str__(self):
output = "%s " % self.__class__.__name__
output += "; ".join(
["%s:%s" % (key, val) for key, val in self.__dict__.items()]
)
return output
class EpsilonGreedyBandit(Bandit):
"""Epsilon Greedy Bandit implementation. Aggressively favors the present winner.
Will assign winning arm 1.0 - epsilon of the time, uniform random between arms
epsilon % of the time.
Will "exploit" the present winner more often with lower values of epsilon, "explore"
other candidates more often with higher values of epsilon.
:param epsilon: The percentage of the time to "explore" other arms, E.G a value of
0.1 will perform random assignment for %10 of traffic
:type epsilon: float
"""
def __init__(self, epsilon=0.1):
super(EpsilonGreedyBandit, self).__init__()
self.epsilon = epsilon
def suggest_arm(self):
"""Get an arm according to the EpsilonGreedy Strategy"""
random_determination = random()
if random_determination > self.epsilon:
key = self._ind_max()
else:
key = choice(self.arms)
return self[key]
def _ind_max(self):
return self.arms[self.confidence.index(max(self.confidence))]
def __str__(self):
return Bandit.__str__(self)
def __repr(self):
return Bandit.__str__(self)
def all_same(items):
return all(x == items[0] for x in items)
class NaiveStochasticBandit(Bandit):
"""A naive weighted random Bandit. Favors the winner by giving it greater weight
in random selection.
Winner will eventually flatten out the losers if margin is great enough
"""
def __init__(self):
super(NaiveStochasticBandit, self).__init__()
def _compute_weights(self):
weights = []
for ind, n in enumerate(self.pulls):
reward = self.reward[ind]
try:
weights.append(1.0 * (float(reward) / float(n)))
except ZeroDivisionError:
weights.append(1.0 / len(self.arms))
return weights
def suggest_arm(self):
"""Get an arm according to the Naive Stochastic Strategy"""
weights = self._compute_weights()
random_determination = uniform(0.0, 1.0)
cum_weight = 0.0
for ind, weight in enumerate(weights):
cum_weight += weight
if cum_weight > random_determination:
return self[self.arms[ind]]
return self[self.arms[0]]
class SoftmaxBandit(NaiveStochasticBandit):
def __init__(self, tau=0.1):
super(SoftmaxBandit, self).__init__()
self.tau = tau
def _compute_weights(self):
weights = []
total_reward = sum([exp(x / self.tau) for x in self.confidence])
for ind, n in enumerate(self.pulls):
weights.append(exp(self.confidence[ind] / self.tau) / total_reward)
return weights
class AnnealingSoftmaxBandit(SoftmaxBandit):
def __init__(self):
super(AnnealingSoftmaxBandit, self).__init__()
self.tau = 1
def _compute_weights(self):
t = sum(self.pulls) + 1
self.tau = 1 / log(t + 0.0000001)
weights = []
total_reward = sum([exp(x / self.tau) for x in self.confidence])
for ind, n in enumerate(self.pulls):
weights.append(exp(self.confidence[ind] / self.tau) / total_reward)
return weights
class ThompsonBandit(NaiveStochasticBandit):
def __init__(self, prior=(1.0, 1.0)):
super(ThompsonBandit, self).__init__()
self.prior = prior
def _compute_weights(self):
sampled_theta = []
for ind, n in enumerate(self.arms):
dist = betavariate(
self.prior[0] + self.reward[ind],
self.prior[1] + self.pulls[ind] - self.reward[ind],
)
sampled_theta += [dist]
return sampled_theta
def suggest_arm(self):
weights = self._compute_weights()
return self[self.arms[weights.index(max(weights))]]
def reward_arm(self, arm_id, reward):
if reward != 1.0:
reward = 1.0
super(ThompsonBandit, self).reward_arm(arm_id, reward) | PypiClean |
/Altair%20Smartworks%20SDK-0.0.1.tar.gz/Altair Smartworks SDK-0.0.1/openapi_client/model/action_delete_response.py | import re # noqa: F401
import sys # noqa: F401
from openapi_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from openapi_client.model.thing_delete_response_error_cluster_backend import ThingDeleteResponseErrorClusterBackend
globals()['ThingDeleteResponseErrorClusterBackend'] = ThingDeleteResponseErrorClusterBackend
class ActionDeleteResponse(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'error_cluster_backend': (ThingDeleteResponseErrorClusterBackend,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'error_cluster_backend': 'error_cluster_backend', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""ActionDeleteResponse - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
error_cluster_backend (ThingDeleteResponseErrorClusterBackend): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value) | PypiClean |
/Dandelion-0.17.26-py3-none-any.whl/dandelion/model/resnet.py | # ------------------------------------------------------------------------------------------------
__author__ = 'dawei.leng'
import theano.tensor as tensor
from ..module import *
from ..functional import *
from ..activation import *
class ResNet_bottleneck(Module):
"""
[ResNet bottleneck block] (https://arxiv.org/abs/1512.03385).
"""
def __init__(self, outer_channel=256, inner_channel=64, border_mode='same', batchnorm_mode=1, activation=relu):
"""
:param outer_channel: channel number of block input
:param inner_channel: channel number inside the block
:param border_mode:
:param batchnorm_mode: {0 | 1 | 2}. 0 means no batch normalization applied; 1 means batch normalization applied to each cnn;
2 means batch normalization only applied to the last cnn
:param activation: default = relu. Note no activation applied to the last element-wise sum output.
"""
super().__init__()
self.activation = activation
self.batchnorm_mode = batchnorm_mode
self.conv1 = Conv2D(in_channels=outer_channel, out_channels=inner_channel, kernel_size=1, pad=border_mode)
self.conv2 = Conv2D(in_channels=inner_channel, out_channels=inner_channel, kernel_size=3, pad=border_mode)
self.conv3 = Conv2D(in_channels=inner_channel, out_channels=outer_channel, kernel_size=1, pad=border_mode)
if batchnorm_mode == 0: # no batch normalization
pass
elif batchnorm_mode == 1: # batch normalization per convolution
self.bn1 = BatchNorm(input_shape=(None, inner_channel, None, None))
self.bn2 = BatchNorm(input_shape=(None, inner_channel, None, None))
self.bn3 = BatchNorm(input_shape=(None, outer_channel, None, None))
elif batchnorm_mode == 2: # only one batch normalization at the end
self.bn3 = BatchNorm(input_shape=(None, outer_channel, None, None))
else:
raise ValueError('batchnorm_mode should be {0 | 1 | 2}')
def forward(self, x):
"""
:param x: (B, C, H, W)
:return:
"""
self.work_mode = 'train'
x0 = x
x = self.conv1.forward(x)
if self.batchnorm_mode == 1:
x = self.bn1.forward(x)
x = self.activation(x)
x = self.conv2.forward(x)
if self.batchnorm_mode == 1:
x = self.bn2.forward(x)
x = self.activation(x)
x = self.conv3.forward(x)
if self.batchnorm_mode in {1, 2}:
x = self.bn3.forward(x)
x = self.activation(x)
x = x + x0
return x
def predict(self, x):
self.work_mode = 'inference'
x0 = x
x = self.conv1.predict(x)
if self.batchnorm_mode == 1:
x = self.bn1.predict(x)
x = self.activation(x)
x = self.conv2.predict(x)
if self.batchnorm_mode == 1:
x = self.bn2.predict(x)
x = self.activation(x)
x = self.conv3.predict(x)
if self.batchnorm_mode in {1, 2}:
x = self.bn3.predict(x)
x = self.activation(x)
x = x + x0
return x | PypiClean |
/DPark-0.5.0.tar.gz/DPark-0.5.0/dpark/web/ui/static/js/bootstrap-tooltip.js | !function ($) {
"use strict"; // jshint ;_;
/* TOOLTIP PUBLIC CLASS DEFINITION
* =============================== */
var Tooltip = function (element, options) {
this.init('tooltip', element, options)
}
Tooltip.prototype = {
constructor: Tooltip
, init: function (type, element, options) {
var eventIn
, eventOut
, triggers
, trigger
, i
this.type = type
this.$element = $(element)
this.options = this.getOptions(options)
this.enabled = true
triggers = this.options.trigger.split(' ')
for (i = triggers.length; i--;) {
trigger = triggers[i]
if (trigger == 'click') {
this.$element.on('click.' + this.type, this.options.selector, $.proxy(this.toggle, this))
} else if (trigger != 'manual') {
eventIn = trigger == 'hover' ? 'mouseenter' : 'focus'
eventOut = trigger == 'hover' ? 'mouseleave' : 'blur'
this.$element.on(eventIn + '.' + this.type, this.options.selector, $.proxy(this.enter, this))
this.$element.on(eventOut + '.' + this.type, this.options.selector, $.proxy(this.leave, this))
}
}
this.options.selector ?
(this._options = $.extend({}, this.options, { trigger: 'manual', selector: '' })) :
this.fixTitle()
}
, getOptions: function (options) {
options = $.extend({}, $.fn[this.type].defaults, this.$element.data(), options)
if (options.delay && typeof options.delay == 'number') {
options.delay = {
show: options.delay
, hide: options.delay
}
}
return options
}
, enter: function (e) {
var defaults = $.fn[this.type].defaults
, options = {}
, self
this._options && $.each(this._options, function (key, value) {
if (defaults[key] != value) options[key] = value
}, this)
self = $(e.currentTarget)[this.type](options).data(this.type)
if (!self.options.delay || !self.options.delay.show) return self.show()
clearTimeout(this.timeout)
self.hoverState = 'in'
this.timeout = setTimeout(function() {
if (self.hoverState == 'in') self.show()
}, self.options.delay.show)
}
, leave: function (e) {
var self = $(e.currentTarget)[this.type](this._options).data(this.type)
if (this.timeout) clearTimeout(this.timeout)
if (!self.options.delay || !self.options.delay.hide) return self.hide()
self.hoverState = 'out'
this.timeout = setTimeout(function() {
if (self.hoverState == 'out') self.hide()
}, self.options.delay.hide)
}
, show: function () {
var $tip
, pos
, actualWidth
, actualHeight
, placement
, tp
, e = $.Event('show')
if (this.hasContent() && this.enabled) {
this.$element.trigger(e)
if (e.isDefaultPrevented()) return
$tip = this.tip()
this.setContent()
if (this.options.animation) {
$tip.addClass('fade')
}
placement = typeof this.options.placement == 'function' ?
this.options.placement.call(this, $tip[0], this.$element[0]) :
this.options.placement
$tip
.detach()
.css({ top: 0, left: 0, display: 'block' })
this.options.container ? $tip.appendTo(this.options.container) : $tip.insertAfter(this.$element)
pos = this.getPosition()
actualWidth = $tip[0].offsetWidth
actualHeight = $tip[0].offsetHeight
switch (placement) {
case 'bottom':
tp = {top: pos.top + pos.height, left: pos.left + pos.width / 2 - actualWidth / 2}
break
case 'top':
tp = {top: pos.top - actualHeight, left: pos.left + pos.width / 2 - actualWidth / 2}
break
case 'left':
tp = {top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left - actualWidth}
break
case 'right':
tp = {top: pos.top + pos.height / 2 - actualHeight / 2, left: pos.left + pos.width}
break
}
this.applyPlacement(tp, placement)
this.$element.trigger('shown')
}
}
, applyPlacement: function(offset, placement){
var $tip = this.tip()
, width = $tip[0].offsetWidth
, height = $tip[0].offsetHeight
, actualWidth
, actualHeight
, delta
, replace
$tip
.offset(offset)
.addClass(placement)
.addClass('in')
actualWidth = $tip[0].offsetWidth
actualHeight = $tip[0].offsetHeight
if (placement == 'top' && actualHeight != height) {
offset.top = offset.top + height - actualHeight
replace = true
}
if (placement == 'bottom' || placement == 'top') {
delta = 0
if (offset.left < 0){
delta = offset.left * -2
offset.left = 0
$tip.offset(offset)
actualWidth = $tip[0].offsetWidth
actualHeight = $tip[0].offsetHeight
}
this.replaceArrow(delta - width + actualWidth, actualWidth, 'left')
} else {
this.replaceArrow(actualHeight - height, actualHeight, 'top')
}
if (replace) $tip.offset(offset)
}
, replaceArrow: function(delta, dimension, position){
this
.arrow()
.css(position, delta ? (50 * (1 - delta / dimension) + "%") : '')
}
, setContent: function () {
var $tip = this.tip()
, title = this.getTitle()
$tip.find('.tooltip-inner')[this.options.html ? 'html' : 'text'](title)
$tip.removeClass('fade in top bottom left right')
}
, hide: function () {
var that = this
, $tip = this.tip()
, e = $.Event('hide')
this.$element.trigger(e)
if (e.isDefaultPrevented()) return
$tip.removeClass('in')
function removeWithAnimation() {
var timeout = setTimeout(function () {
$tip.off($.support.transition.end).detach()
}, 500)
$tip.one($.support.transition.end, function () {
clearTimeout(timeout)
$tip.detach()
})
}
$.support.transition && this.$tip.hasClass('fade') ?
removeWithAnimation() :
$tip.detach()
this.$element.trigger('hidden')
return this
}
, fixTitle: function () {
var $e = this.$element
if ($e.attr('title') || typeof($e.attr('data-original-title')) != 'string') {
$e.attr('data-original-title', $e.attr('title') || '').attr('title', '')
}
}
, hasContent: function () {
return this.getTitle()
}
, getPosition: function () {
var el = this.$element[0]
return $.extend({}, (typeof el.getBoundingClientRect == 'function') ? el.getBoundingClientRect() : {
width: el.offsetWidth
, height: el.offsetHeight
}, this.$element.offset())
}
, getTitle: function () {
var title
, $e = this.$element
, o = this.options
title = $e.attr('data-original-title')
|| (typeof o.title == 'function' ? o.title.call($e[0]) : o.title)
return title
}
, tip: function () {
return this.$tip = this.$tip || $(this.options.template)
}
, arrow: function(){
return this.$arrow = this.$arrow || this.tip().find(".tooltip-arrow")
}
, validate: function () {
if (!this.$element[0].parentNode) {
this.hide()
this.$element = null
this.options = null
}
}
, enable: function () {
this.enabled = true
}
, disable: function () {
this.enabled = false
}
, toggleEnabled: function () {
this.enabled = !this.enabled
}
, toggle: function (e) {
var self = e ? $(e.currentTarget)[this.type](this._options).data(this.type) : this
self.tip().hasClass('in') ? self.hide() : self.show()
}
, destroy: function () {
this.hide().$element.off('.' + this.type).removeData(this.type)
}
}
/* TOOLTIP PLUGIN DEFINITION
* ========================= */
var old = $.fn.tooltip
$.fn.tooltip = function ( option ) {
return this.each(function () {
var $this = $(this)
, data = $this.data('tooltip')
, options = typeof option == 'object' && option
if (!data) $this.data('tooltip', (data = new Tooltip(this, options)))
if (typeof option == 'string') data[option]()
})
}
$.fn.tooltip.Constructor = Tooltip
$.fn.tooltip.defaults = {
animation: true
, placement: 'top'
, selector: false
, template: '<div class="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>'
, trigger: 'hover focus'
, title: ''
, delay: 0
, html: false
, container: false
}
/* TOOLTIP NO CONFLICT
* =================== */
$.fn.tooltip.noConflict = function () {
$.fn.tooltip = old
return this
}
}(window.jQuery); | PypiClean |
/Flask-Scaffold-0.5.1.tar.gz/Flask-Scaffold-0.5.1/app/templates/static/node_modules/angular-grid/src/ts/widgets/agDropdownList.ts |
module awk.grid {
var utils = Utils;
var svgFactory = SvgFactory.getInstance();
export class AgDropdownList {
private itemSelectedListeners: any;
private eValue: any;
private agList: any;
private eGui: any;
private hidePopupCallback: any;
private selectedItem: any;
private cellRenderer: any;
private popupService: PopupService;
constructor(popupService: PopupService) {
this.popupService = popupService;
this.setupComponents();
this.itemSelectedListeners = [];
}
setWidth(width: any) {
this.eValue.style.width = width + 'px';
this.agList.addStyles({width: width + 'px'});
}
addItemSelectedListener(listener: any) {
this.itemSelectedListeners.push(listener);
}
fireItemSelected(item: any) {
for (var i = 0; i < this.itemSelectedListeners.length; i++) {
this.itemSelectedListeners[i](item);
}
}
setupComponents() {
this.eGui = document.createElement('span');
this.eValue = document.createElement('span');
this.eGui.appendChild(this.eValue);
this.agList = new AgList();
this.eValue.addEventListener('click', this.onClick.bind(this));
this.agList.addItemSelectedListener(this.itemSelected.bind(this));
this.agList.addCssClass('ag-popup-list');
utils.addStylesToElement(this.eValue, {
border: '1px solid darkgrey',
display: 'inline-block',
paddingLeft: 2
});
utils.addStylesToElement(this.eGui, {position: 'relative'});
this.agList.addStyles({
display: 'inline-block',
position: 'absolute',
top: 0,
left: 0,
backgroudColor: 'white'
});
}
itemSelected(item: any) {
this.setSelected(item);
if (this.hidePopupCallback) {
this.hidePopupCallback();
}
this.fireItemSelected(item);
}
onClick() {
var agListGui = this.agList.getGui();
this.popupService.positionPopup(this.eGui, agListGui, -1);
this.hidePopupCallback = this.popupService.addAsModalPopup(agListGui, true);
}
getGui() {
return this.eGui;
}
setSelected(item: any) {
this.selectedItem = item;
this.refreshView();
}
setCellRenderer(cellRenderer: any) {
this.agList.setCellRenderer(cellRenderer);
this.cellRenderer = cellRenderer;
}
refreshView() {
utils.removeAllChildren(this.eValue);
if (this.selectedItem) {
if (this.cellRenderer) {
var params = {value: this.selectedItem};
utils.useRenderer(this.eValue, this.cellRenderer, params);
} else {
this.eValue.appendChild(document.createTextNode(this.selectedItem));
}
}
var eDownIcon: any = svgFactory.createSmallArrowDownSvg();
eDownIcon.style.float = 'right';
eDownIcon.style.marginTop = '6';
eDownIcon.style.marginRight = '2';
this.eValue.appendChild(eDownIcon);
}
setModel(model: any) {
this.agList.setModel(model);
}
}
} | PypiClean |
/12306-booking-0.1.18.tar.gz/12306-booking-0.1.18/booking/auth.py | import os
import time
import uuid
import json
import base64
import logging
from hack12306.auth import TrainAuthAPI
from hack12306.user import TrainUserAPI
from hack12306.exceptions import TrainUserNotLogin
from . import settings
from .remind import remind_login_qr
_logger = logging.getLogger('booking')
__all__ = ('auth_qr', 'auth_reauth', 'auth_is_login')
LOGIN_QR_HTML_TPL = """
<html>
<head>
<title>12306-booking 扫码登录</title>
<meta charset="utf8">
</head>
<body>
<h1>12306订票助手</h1>
<img src="{filepath}", alt="12306登录二维码"><br>
<b>请用12306 APP 扫码二维码登录</b>
</body>
<script>
setTimeout("window.close()", 30000)
</script>
</html>
"""
def _uamtk_set(uamtk):
settings.AUTH_UAMTK = uamtk
def _uamtk_get():
return settings.AUTH_UAMTK
def auth_is_login(cookies=None):
"""
检查用户是否登录
:param cookies JSON对象
:return True已登录, False未登录
"""
result = TrainAuthAPI().auth_check_login(cookies=cookies)
if not result:
_logger.debug('会话已过期,请重新登录!')
return result
def auth_reauth(uamtk, cookie_dict):
"""
重新认证
:param aumtk
:param cookie_dict
:return JSON对象
"""
assert uamtk is not None
assert isinstance(cookie_dict, dict)
train_auth_api = TrainAuthAPI()
uamtk_result = train_auth_api.auth_uamtk(uamtk, cookies=cookie_dict)
_logger.debug('4. auth uamtk result. %s' % json.dumps(uamtk_result, ensure_ascii=False))
uamauth_result = train_auth_api.auth_uamauth(uamtk_result['newapptk'], cookies=cookie_dict)
_logger.debug('5. auth uamauth result. %s' % json.dumps(uamauth_result, ensure_ascii=False))
return uamauth_result
def auth_qr():
"""
认证-二维码登录
"""
try:
qr_img_path = '/tmp/12306/booking/login-qr-%s.jpeg' % uuid.uuid1().hex
login_html_path = '/tmp/12306/booking/login-qr-%s.html' % uuid.uuid1().hex
train_auth_api = TrainAuthAPI()
_logger.debug('1. auth init')
cookie_dict = train_auth_api.auth_init()
_logger.debug('2. auth get qr')
result = train_auth_api.auth_qr_get(cookies=cookie_dict)
assert isinstance(result, dict)
qr_uuid = result['uuid']
if not os.path.exists(os.path.dirname(qr_img_path)):
os.makedirs(os.path.dirname(qr_img_path))
with open(qr_img_path, 'wb') as f:
f.write(base64.b64decode(result['image']))
with open(login_html_path, 'w+') as f:
f.write(LOGIN_QR_HTML_TPL.format(filepath=qr_img_path))
# open qr image with browser
cmd = 'open %s' % login_html_path
os.system(cmd)
_logger.debug('3. auth check qr')
for _ in range(10):
_logger.info('请扫描二维码登录!')
remind_login_qr()
qr_check_result = train_auth_api.auth_qr_check(qr_uuid, cookies=cookie_dict)
_logger.debug('check qr result. %s' % json.dumps(qr_check_result, ensure_ascii=False))
if qr_check_result['result_code'] == "2":
_logger.debug('qr check success result. %s' % json.dumps(qr_check_result, ensure_ascii=False))
_logger.info('二维码扫描成功!')
break
time.sleep(3)
else:
_logger.error('二维码扫描失败,重新生成二维码')
raise TrainUserNotLogin('扫描述二维码失败')
_uamtk_set(qr_check_result['uamtk'])
uamauth_result = auth_reauth(_uamtk_get(), cookie_dict)
_logger.info('%s 登录成功。' % uamauth_result['username'].encode('utf8'))
cookies = {
'tk': uamauth_result['apptk']
}
cookies.update(**cookie_dict)
_logger.debug('cookies. %s' % json.dumps(cookies, ensure_ascii=False,))
# user_info_result = TrainUserAPI().user_info(cookies=cookies)
# _logger.debug('%s login successfully.' % user_info_result['name'])
return cookies
finally:
if os.path.exists(qr_img_path):
os.remove(qr_img_path)
if os.path.exists(login_html_path):
os.remove(login_html_path) | PypiClean |
/Labeventtable-0.0.1.tar.gz/Labeventtable-0.0.1/README.md | ## Generating Labevent Table using python
[](https://www.python.org/)
[](https://www.python.org/downloads/release/python-360/)
To install and import the package
```
pip install Labeventtable
```
```
from Labeventtable import labeventtable
```
To generate table follow these steps
Import these packages
```
-import pandas as pd
```
```
-import numpy as np
```
```
-import datetime
```
```
-from datetime import datetime, timedelta
```
Load these data
```
-labevents_data
```
```
-propensity_data
```
```
-labitems_data
```
Example
```
table=labeventtable(labevents_data,propensity_data,labitems_data)
table
```
| PypiClean |
/42di-0.2.6.tar.gz/42di-0.2.6/di/__init__.py | from logging import error
import requests
from requests import status_codes
import swagger_client as sw
import json
import pandas
import warnings
from enum import Enum
#warnings.filterwarnings('default')
SSL = True
TOKEN = None
class ResourceType(Enum):
INVALID = 0
TEAM = 1
PROJECT = 2
DATASET = 3
FILE = 4
class ResourceId():
def __init__(self, uri):
self.endpoint = None
self.team_id = None
self.project_id = None
self.dataset_id = None
self.file_name = None
self._parse_uri(uri)
def _parse_uri(self, uri):
if not uri:
return
s = uri.strip().split('/')
length = len(s)
if length < 1 or length > 5:
raise ValueError("invalid ResourceId: %s" % uri)
if length > 0:
self.endpoint = s[0]
if length > 1:
self.team_id = s[1]
if length > 2:
self.project_id = s[2]
if length > 3:
self.dataset_id = s[3]
if length > 4:
self.file_name = s[4]
def resource_type(self):
if self.file_name:
return ResourceType.FILE
elif self.dataset_id:
return ResourceType.DATASET
elif self.project_id:
return ResourceType.PROJECT
elif self.team_id:
return ResourceType.TEAM
else:
return ResourceType.INVALID
def team_resource_id(self):
if self.endpoint and self.team_id:
return '/'.join([self.endpoint, self.team_id])
else:
return None
def project_resource_id(self):
if self.endpoint and self.team_id and self.project_id:
return '/'.join([self.endpoint, self.team_id, self.project_id])
else:
return None
def dataset_resource_id(self):
if self.endpoint and self.team_id and self.project_id and self.dataset_id:
return '/'.join([self.endpoint, self.team_id, self.project_id, self.dataset_id])
else:
return None
def file_resource_id(self):
if self.endpoint and self.team_id and self.project_id and self.dataset_id and self.file_name:
return '/'.join([self.endpoint, self.team_id, self.project_id, self.dataset_id, self.file_name])
else:
return None
def __str__(self):
resource_type = self.resource_type()
if resource_type == ResourceType.TEAM:
return self.team_resource_id()
elif resource_type == ResourceType.PROJECT:
return self.project_resource_id()
elif resource_type == ResourceType.DATASET:
return self.dataset_resource_id()
elif resource_type == ResourceType.FILE:
return self.file_resource_id()
else:
return None
def str(self):
return self.__str__()
def _create_sw_client(host, token):
cfg = sw.Configuration()
#cfg.api_key["token"] = token
scheme = "https://" if SSL else "http://"
cfg.host = scheme + host + "/api/v1"
return sw.ApiClient(cfg, "Authorization", "Bearer " + token)
class Project():
def __init__(self, project, access_token=None):
global SSL
global TOKEN
ri = ResourceId(project)
if not ri.project_resource_id():
raise ValueError("invalid project resource identity: %s" % project)
self.host = ri.endpoint
self.team_id = ri.team_id
self.project_id = ri.project_id
self.access_token = access_token
if not self.access_token:
self.access_token = TOKEN
if not self.access_token:
raise ValueError("access token required.")
self.sw_client = _create_sw_client(self.host, self.access_token)
def dataset(self, dataset_id):
return Dataset(self.team_id, self.project_id, dataset_id, self.sw_client, self.access_token)
def list_datasets(self):
return self.dataset(None).list()
# Deprecated
def table(self, table_name):
warnings.warn("Deprecated, use dataset instead.", DeprecationWarning)
return self.dataset(table_name)
def list_tables(self):
warnings.warn("Deprecated, use list_datasets instead.", DeprecationWarning)
return self.list_datasets()
class Dataset():
def __init__(self, team_id, project_id, dataset_id, sw_client, token=None):
self.team_id = team_id
self.project_id = project_id
self.dataset_id = dataset_id
self.sw_client = sw_client
self.api = sw.DatasetsApi(self.sw_client)
self.token = token
def list(self):
try:
return self.api.list_datasets(self.team_id, self.project_id)
except sw.rest.ApiException as e:
_err_format(e)
def get(self, get_schema=False):
t = None
try:
t = self.api.get_dataset(self.team_id, self.project_id, self.dataset_id, schema=get_schema)
except sw.rest.ApiException as e:
if e.status != 404:
_err_format(e)
return t
def exists(self):
return self.get() is not None
def create(self):
try:
self.api.update_dataset(self.team_id, self.project_id, self.dataset_id)
except sw.rest.ApiException as e:
_err_format(e)
def delete(self):
try:
self.api.delete_dataset(self.team_id, self.project_id, self.dataset_id)
except sw.rest.ApiException as e:
_err_format(e)
def update(self, prop, value):
action = sw.PatchAction(action="UPDATE", _property=prop, value=value)
try:
self.api.update_dataset_property(self.team_id, self.project_id, self.dataset_id, body=action)
except sw.rest.ApiException as e:
_err_format(e)
def update_schema(self, schema):
self.update("schema", json.dumps(schema));
def put(self, data, file_name=None, tag=None, content_type=None):
if file_name is None or file_name.strip() == "":
file_name = "0"
if tag is None:
tag = ""
if content_type is None or content_type.strip() == "":
content_type = ""
try:
self.api.put_dataset_data_file(self.team_id, self.project_id, self.dataset_id, file_name , x_di_tag=tag, content_type=content_type, body=data)
except sw.rest.ApiException as e:
_err_format(e)
def put_csv(self, df, file_name=None, tag=None):
body = df.to_csv()
self.put(body, file_name, tag, "text/csv")
def put_parquet(self, df, file_name=None, tag=None):
body = df.to_parquet()
self.put(body, file_name, tag, "application/parquet")
def files(self):
try:
return self.api.list_dataset_data_files(self.team_id, self.project_id, self.dataset_id)
except sw.rest.ApiException as e:
_err_format(e)
def get_file_meta(self, data_file_name):
try:
return self.api.get_dataset_data_file_meta(self.team_id, self.project_id, self.dataset_id, data_file_name)
except sw.rest.ApiException as e:
_err_format(e)
def _get_file_url(self, file_name):
url = '/'.join([self.sw_client.configuration.host, "teams", self.team_id, "projects", self.project_id, "datasets", self.dataset_id, "data", file_name])
if self.token:
url += "?token=" + self.token
r = requests.get(url, allow_redirects=False)
if r.status_code == 303:
return r.headers.get('Location')
else:
raise BaseException("Get DataFile content failed: %s" % str(r.status_code))
def _read_df(self, file_name, format):
url = self._get_file_url(file_name)
if url and format == "text/csv":
return pandas.read_csv(url)
elif format == "application/parquet":
return pandas.read_parquet(url)
else:
raise BaseException("File format unsupported.")
def read(self, file_name=[]):
filters = set(file_name)
dfs = []
files = self.files()
for f in files:
if len(file_name) > 0 and f.name not in filters:
continue
df = self._read_df(f.name, f.content_type)
dfs.append(df)
return None if len(dfs) == 0 else pandas.concat(dfs)
def Table(Dataset):
def __init__(self, team_id, project_id, table_name, sw_client, token=None):
Dataset.__init__(team_id, project_id, table_name, sw_client, token)
warnings.warn("Deprecated, use Dataset instead.", DeprecationWarning)
def schema(df):
columns = []
for name in df.index.names:
columns.append({
"name": name,
"data_type": str(df.index.dtype),
"key": "index"
})
for index, value in df.dtypes.items():
columns.append({
"name": index,
"data_type": str(value)
})
return {"columns": columns}
def dataset(identity, token=None):
ri = ResourceId(str(identity))
if ri.resource_type() != ResourceType.DATASET:
raise ValueError("invalid resource id: %s" % identity)
return Project(ri.project_resource_id(), access_token=token).dataset(ri.dataset_id)
def put(identity, data, token=None, content_type='application/parquet', create=True, update_schema=False):
ri = ResourceId(str(identity))
if ri.resource_type() != ResourceType.DATASET and ri.resource_type() != ResourceType.FILE:
raise ValueError("invalid resource id: %s" % identity)
project = Project(ri.project_resource_id(), token)
dataset = project.dataset(ri.dataset_id)
if not dataset.exists() and create:
dataset.create()
if content_type == 'text/csv':
dataset.put_csv(data, ri.file_name)
elif content_type == 'application/parquet':
dataset.put_parquet(data, ri.file_name)
else:
dataset.put(data, file_name=ri.file_name, tag=None, content_type=content_type)
if update_schema:
dataset.update_schema(schema(data))
def read(identity, token=None):
ri = ResourceId(str(identity))
if ri.resource_type() != ResourceType.DATASET and ri.resource_type() != ResourceType.FILE:
raise ValueError("invalid resource id: %s" % identity)
project = Project(ri.project_resource_id(), token)
files = []
if ri.file_name:
files.append(ri.file_name)
return project.dataset(ri.dataset_id).read(file_name=files)
class DIException(Exception):
def __init__(self, status, code, msg):
self.status = status
self.code = code
self.msg = msg
Exception.__init__(self, self.status, "HTTP Status: %s, Code: %s, Message: %s" % (self.status, self.code, self.msg))
def _err_format(e):
err = {}
try:
err = json.loads(e.body)
except json.decoder.JSONDecodeError as je:
err = {"code": "JSONDecodeError", "message": je}
raise DIException(e.status, err["code"], err["message"]) from None | PypiClean |
/BioFlow-0.2.3.tar.gz/BioFlow-0.2.3/bioflow/annotation_network/BioKnowledgeInterface.py | import hashlib
import json
import pickle
import random
import string
import math
from collections import defaultdict
from copy import copy
from csv import reader
from itertools import combinations, chain
from pprint import PrettyPrinter
from random import shuffle
from time import time
import traceback, sys
import numpy as np
from scipy.sparse import lil_matrix
from scipy.sparse.csgraph import shortest_path
from bioflow.algorithms_bank import conduction_routines as cr
from bioflow.main_configs import Dumps, Outputs, annotome_rand_samp
from bioflow.molecular_network.InteractomeInterface import InteractomeInterface
from bioflow.neo4j_db.GraphDeclarator import DatabaseGraph
from bioflow.neo4j_db.db_io_routines import get_db_id
from bioflow.utils.gdfExportInterface import GdfExportInterface
from bioflow.utils.io_routines import dump_object, undump_object, get_background_bulbs_ids
from bioflow.utils.log_behavior import get_logger
log = get_logger(__name__)
def _characterise(_object):
print 'Object of size %s and type %s' % (len(_object), type(_object))
def _characterise_mat(matrix):
print 'Matrix of shape %s, type %s and has %s non-zero terms, min is %s, max is %s' % \
(matrix.shape, type(matrix), len(matrix.nonzero()[0]), '<>', '<>')
class GeneOntologyInterface(object):
"""
General class to recover all the information associated with GO from database and buffer
them for further use.
:param Filter:
:param Uniprot_Node_IDs: A list of hte reached_uniprots_neo4j_id_list that will be used
for the GO reach and informativity computation. Beyond database and formalism issues,
this allows to adapt method when limited list of UP is of interest
:param correction_factor:
:param Ultraspec_clean:
:param Ultraspec_lvl: parameter how much uniprots have to point to a GO term for it not
to be considered ultra-specific anymore
"""
_GOUpTypes = ["is_a_go", "is_part_of_go"]
_GORegTypes = ["is_Regulant"]
def __init__(self, namespace_filter=('biological_process',),
uniprot_node_ids=None,
correction_factor=(1, 1),
ultraspec_clean=True, ultraspec_lvl=3):
self.interactome_interface_instance = InteractomeInterface(True, True)
self.interactome_interface_instance.fast_load()
init_set = self.interactome_interface_instance.all_uniprots_neo4j_id_list
if uniprot_node_ids:
init_set = list(set(init_set).intersection(set(uniprot_node_ids)))
self.go_namespace_filter = list(namespace_filter)
self.InitSet = init_set
self.correction_factor = correction_factor
self.ultraspec_cleaned = ultraspec_clean
self.ultraspec_lvl = ultraspec_lvl
self.init_time = time()
self.partial_time = time()
self.UPs_without_GO = set()
self.UP2GO_Dict = {}
self.GO2UP = defaultdict(list)
self.SeedSet = set()
self.All_GOs = []
self.GO2Num = {}
self.Num2GO = {}
self.total_Entropy = None
self.Reachable_nodes_dict = {}
self.UP2GO_Reachable_nodes = {}
self.GO2UP_Reachable_nodes = {}
self.UP2GO_step_Reachable_nodes = {}
self.GO2UP_step_Reachable_nodes = {}
self.GO2_Pure_Inf = {}
self.GO2_Weighted_Ent = {}
self.GO_Names = {}
self.GO_Legacy_IDs = {}
self.rev_GO_IDs = {}
self.UP_Names = {}
self.adjacency_matrix = np.zeros((2, 2))
self.dir_adj_matrix = np.zeros((2, 2))
self.laplacian_matrix = np.zeros((2, 2))
self.weighted_laplacian_matrix = np.zeros((2, 2))
self.Sign_retaining_matrix = np.zeros((2, 2))
self.TimesReached = {}
self.accelerationDict = {}
self.Reverse_Dict = {}
self.GO_Node_ID2Reach = {}
# everytghing below should not be dumped, but exported to the
# mongoDB
self.inflated_Laplacian = np.zeros((2, 2))
self.inflated_idx2lbl = {}
self.inflated_lbl2idx = {}
self.binding_intensity = 0
self.analytic_uniprots = []
self.UP2UP_voltages = {}
self.uniprots_2_voltage_and_circulation = {} # can be safely renamed
self.current_accumulator = np.zeros((2, 2))
self.node_current = {}
self.call_coutner = 0
char_set = string.ascii_uppercase + string.digits
self.random_tag = ''.join(random.sample(char_set * 6, 6))
self.Indep_Lapl = np.zeros((2, 2))
self.uncomplete_compute = False
self.main_set = self.InitSet
log.info('Setting up GO Interface with namespaces %s and %s root UPs',
self.go_namespace_filter, len(self.InitSet))
def pretty_time(self):
"""
Times the execution
:return: tuple containing the time since the creation of the Matrix_getter object and
since the last cal of function formatted as string
:rtype: str
"""
it, pt = (round(time() - self.init_time),
round(time() - self.partial_time))
pload = 'total: %s m %s s, \t partial: %s m %s s' % (
int(it) / 60, it % 60, int(pt) / 60, pt % 60)
self.partial_time = time()
return pload
def _time(self):
pt = time() - self.partial_time
return pt
def dump_statics(self):
dump_object(
Dumps.GO_builder_stat,
(self.go_namespace_filter,
self.InitSet,
self.correction_factor,
self.ultraspec_cleaned,
self.ultraspec_lvl))
@staticmethod
def undump_statics():
return undump_object(Dumps.GO_builder_stat)
def dump_core(self):
dump_object(
Dumps.GO_dump,
(self.UP2GO_Dict,
self.GO2UP,
self.SeedSet,
self.Reachable_nodes_dict,
self.GO_Names,
self.GO_Legacy_IDs,
self.rev_GO_IDs,
self.All_GOs,
self.GO2Num,
self.Num2GO,
self.UP_Names,
self.UPs_without_GO))
def undump_core(self):
self.UP2GO_Dict, self.GO2UP, self.SeedSet, self.Reachable_nodes_dict,\
self.GO_Names, self.GO_Legacy_IDs, self.rev_GO_IDs, self.All_GOs,\
self.GO2Num, self.Num2GO, self.UP_Names, self.UPs_without_GO =\
undump_object(Dumps.GO_dump)
def dump_matrices(self):
dump_object(
Dumps.GO_Mats,
(self.adjacency_matrix,
self.dir_adj_matrix,
self.laplacian_matrix))
def undump_matrices(self):
self.adjacency_matrix, self.dir_adj_matrix, self.laplacian_matrix = undump_object(
Dumps.GO_Mats)
def dump_informativities(self):
dump_object(
Dumps.GO_Infos,
(self.UP2GO_Reachable_nodes,
self.GO2UP_Reachable_nodes,
self.UP2GO_step_Reachable_nodes,
self.GO2UP_step_Reachable_nodes,
self.GO2_Pure_Inf,
self.GO2_Weighted_Ent))
def undump_informativities(self):
self.UP2GO_Reachable_nodes, self.GO2UP_Reachable_nodes, self.UP2GO_step_Reachable_nodes, \
self.GO2UP_step_Reachable_nodes, self.GO2_Pure_Inf, self.GO2_Weighted_Ent = \
undump_object(Dumps.GO_Infos)
def dump_inflated_elements(self):
dump_object(
Dumps.GO_Inflated,
(self.inflated_Laplacian,
self.inflated_idx2lbl,
self.inflated_lbl2idx,
self.binding_intensity))
def undump_inflated_elements(self):
self.inflated_Laplacian, self.inflated_idx2lbl, \
self.inflated_lbl2idx, self.binding_intensity = \
undump_object(Dumps.GO_Inflated)
def dump_memoized(self):
md5 = hashlib.md5(
json.dumps(
sorted(
self.analytic_uniprots),
sort_keys=True)).hexdigest()
payload = {
'UP_hash': md5, 'sys_hash': self.md5_hash(), 'size': len(
self.analytic_uniprots), 'UPs': pickle.dumps(
self.analytic_uniprots), 'currents': pickle.dumps(
(self.current_accumulator, self.node_current)), 'voltages': pickle.dumps(
self.uniprots_2_voltage_and_circulation)}
dump_object(Dumps.GO_Analysis_memoized, payload)
@staticmethod
def undump_memoized():
"""
:return: undumped memoized analysis
"""
return undump_object(Dumps.GO_Analysis_memoized)
def dump_independent_linear_sets(self):
dump_object(Dumps.GO_Indep_Linset, self.Indep_Lapl)
def undump_independent_linear_sets(self):
self.Indep_Lapl = undump_object(Dumps.GO_Indep_Linset)
def full_rebuild(self):
self.get_gene_ontology_access()
self.get_gene_ontology_structure()
self.get_go_adjacency_and_laplacian()
self.get_go_reach()
if self.ultraspec_cleaned:
self.filter_out_too_specific()
self.get_laplacians()
self.inflate_matrix_and_indexes()
self.dump_statics()
self.dump_core()
self.dump_matrices()
self.dump_informativities()
self.dump_inflated_elements()
log.info('Finished rebuilding the GO Interface object %s', self.pretty_time())
def load(self):
"""
loads itself from the saved dumps, in case the Filtering system is the same
"""
namespace_filter, initial_set, correction_factor, ultraspec_cleaned, ultraspec_lvl = \
self.undump_statics()
if self.go_namespace_filter != namespace_filter:
log.critical("Wrong Filtering attempted to be recovered from storage")
raise Exception(
"Wrong Filtering attempted to be recovered from storage")
if self.InitSet != initial_set:
print len(self.InitSet)
print len(initial_set)
print traceback.print_stack()
log.critical("Wrong initial_set attempted to be recovered from storage")
raise Exception(
"Wrong initial_set attempted to be recovered from storage")
if self.correction_factor != correction_factor:
log.critical("Wrong correction factor attempted to be recovered from storage")
raise Exception(
"Wrong correction factor attempted to be recovered from storage")
if self.ultraspec_cleaned != ultraspec_cleaned:
log.critical(
"Ultraspecific terms leveling state is not the same in the database as requested")
raise Exception(
"Ultraspecific terms leveling state is not the same in the database as requested")
if self.ultraspec_lvl != ultraspec_lvl:
log.critical(
"Ultraspecific terms leveling cut-off is not the same in the database as requested")
raise Exception(
"Ultraspecific terms leveling cut-off is not the same in the database as requested")
self.undump_core()
self.undump_matrices()
self.undump_informativities()
self.undump_inflated_elements()
def get_gene_ontology_access(self):
"""
Loads all of the relations between the UNIPROTs and GOs as one giant dictionary
"""
uniprots_without_gene_ontology_terms = 0
log.info('Starting GO matrix mapping starting from %s uniprots', len(self.InitSet))
for uniprot_neo4j_id in self.InitSet:
uniprot_specific_gos = []
up_node = DatabaseGraph.get(uniprot_neo4j_id)
self.UP_Names[uniprot_neo4j_id] = [up_node.properties['legacyId'],
up_node.properties['displayName']]
attached_go_nodes = DatabaseGraph.get_linked(uniprot_neo4j_id,
link_type='is_go_annotation')
for go_node in attached_go_nodes:
if go_node.properties['Namespace'] in self.go_namespace_filter:
go_node_neo4j_id = get_db_id(go_node)
uniprot_specific_gos.append(go_node_neo4j_id)
self.GO2UP[go_node_neo4j_id].append(uniprot_neo4j_id)
self.SeedSet.add(go_node_neo4j_id)
if not uniprot_specific_gos:
uniprots_without_gene_ontology_terms += 1
log.debug("UP without GO was found. UP bulbs_id: %s, \t name: %s",
uniprot_neo4j_id, self.UP_Names[uniprot_neo4j_id])
self.UPs_without_GO.add(uniprot_neo4j_id)
else:
self.UP2GO_Dict[uniprot_neo4j_id] = copy(uniprot_specific_gos)
log.info('total number of UPs without a go_node annotation: %s out of %s',
uniprots_without_gene_ontology_terms, len(self.InitSet))
# TODO: REFACTORING. Method is excessively complex.
def get_gene_ontology_structure(self):
"""
Loads all of the relations between the GOs that are generalisation of the seedList
GOs and that are withing the types specified in go_namespace_filter
"""
visited_set = set()
seeds_list = copy(list(self.SeedSet))
log.info('Starting gene ontology structure retrieval from the set of %s seeds',
len(self.SeedSet))
while seeds_list:
node_id = seeds_list.pop()
visited_set.add(node_id)
local_uniprot_list = []
local_regulation_list = []
local_up_regulation_list = []
local_down_regulation_list = []
gene_ontology_node = DatabaseGraph.get(node_id, 'GOTerm')
self.GO_Names[node_id] = str(gene_ontology_node.properties['displayName'])
self.GO_Legacy_IDs[node_id] = str(gene_ontology_node.properties['legacyId'])
self.rev_GO_IDs[gene_ontology_node.properties['legacyId']] = node_id
for relation_type in chain(self._GOUpTypes, self._GORegTypes):
related_go_nodes = DatabaseGraph.get_linked(node_id, 'out', relation_type)
if not related_go_nodes:
continue # skip in case GO Node has no outgoing relations to other GO nodes
for go_node in related_go_nodes:
if go_node.properties['Namespace'] not in self.go_namespace_filter:
continue
node_bulbs_id = get_db_id(go_node)
if node_bulbs_id not in visited_set:
seeds_list.append(node_bulbs_id)
if relation_type in self._GOUpTypes:
local_uniprot_list.append(node_bulbs_id)
else:
local_regulation_list.append(node_bulbs_id)
rev_generator = DatabaseGraph.get_linked(node_id, 'in', relation_type)
if not rev_generator:
continue
for go_node in rev_generator:
if go_node.properties['Namespace'] not in self.go_namespace_filter:
continue
node_bulbs_id = get_db_id(go_node)
if relation_type in self._GOUpTypes:
local_down_regulation_list.append(node_bulbs_id)
else:
local_up_regulation_list.append(node_bulbs_id)
self.Reachable_nodes_dict[node_id] = (
list(set(local_uniprot_list)),
list(set(local_regulation_list)),
list(set(local_down_regulation_list)),
list(set(local_up_regulation_list)))
self.All_GOs = list(visited_set)
self.Num2GO = dict((i, val) for i, val in enumerate(self.All_GOs))
self.GO2Num = dict((val, i) for i, val in enumerate(self.All_GOs))
def get_go_adjacency_and_laplacian(self, include_reg=True):
"""
Builds Undirected and directed adjacency matrices for the GO set and
:param include_reg: if True, the regulation set is included into the matrix
:warning: if the parameter above is set to False, get_GO_reach module will be
unable to function.
"""
def build_adjacency():
"""
Builds undirected adjacency matrix for the GO transitions
"""
base_matrix = lil_matrix((len(self.All_GOs), len(self.All_GOs)))
for node, package in self.Reachable_nodes_dict.iteritems():
fw_nodes = package[0]
if include_reg:
fw_nodes += package[1]
for node2 in fw_nodes:
idx = (self.GO2Num[node], self.GO2Num[node2])
base_matrix[idx] = 1
idx = (idx[1], idx[0])
base_matrix[idx] = 1
self.adjacency_matrix = copy(base_matrix)
def build_dir_adj():
"""
Builds directed adjacency matrix for the GO transitions
"""
base_matrix = lil_matrix((len(self.All_GOs), len(self.All_GOs)))
for node, package in self.Reachable_nodes_dict.iteritems():
fw_nodes = package[0]
if include_reg:
fw_nodes += package[1]
for node2 in fw_nodes:
idx = (self.GO2Num[node], self.GO2Num[node2])
base_matrix[idx] = 1
self.dir_adj_matrix = copy(base_matrix)
build_adjacency()
build_dir_adj()
def calculate_informativity(self, number):
"""
returns an entropy given by a number of equi-probable events, where event is the number.
:param number:
"""
if number < 1.0:
log.critical("Wrong value provided for entropy computation")
raise Exception("Wrong value provided for entropy computation")
if not self.total_Entropy:
self.total_Entropy = - \
math.log(1 / float(len(self.UP2GO_Dict.keys())), 2)
if number == 1.0:
return 2 * self.total_Entropy
return pow(-self.correction_factor[0] * self.total_Entropy /
math.log(1 / float(number), 2), self.correction_factor[1])
# TODO: REFACTORING. Method is excessively complex.
def get_go_reach(self):
"""
Recovers by how many different uniprots each GO term is reached, both in
distance-agnostic and distance-specific terms.
"""
def verify_equivalence_of_reaches(step_reach, reach):
"""
:param step_reach:
:param reach:
:raise Exception:
"""
dict_len = {key: [len(val), len(step_reach[key].keys())]
for key, val in reach.iteritems()}
for key, val in dict_len.iteritems():
if val[1] != val[0]:
log.critical(
'Reach exploration results not equivalent! Please report the error.')
raise Exception(
'Reach exploration results not equivalent! Please report the error.')
def special_sum(_val_dict, filter_function=lambda x: x + 1.0):
"""
Special sum used for the computation of staged informativity of different terms
:param _val_dict:
:param filter_function:
:raise Exception:
"""
summer = 0
for key, val_list in _val_dict.iteritems():
summer += filter_function(key) * len(val_list)
return summer
dir_reg_path = shortest_path(
self.dir_adj_matrix, directed=True, method='D')
dir_reg_path[np.isinf(dir_reg_path)] = 0.0
dir_reg_path = lil_matrix(dir_reg_path)
self.GO2UP_Reachable_nodes = dict(
(el, []) for el in self.Reachable_nodes_dict.keys())
self.GO2UP_Reachable_nodes.update(self.GO2UP)
pre_go2up_step_reachable_nodes = \
dict((key, dict((v, 0) for v in val))
for key, val in self.GO2UP_Reachable_nodes.iteritems())
# when called on possibly un-encoutenred items, anticipate a default
# value of 10 000
# Now just scan vertical columns and add UP terms attached
for idx1, idx2 in zip(list(dir_reg_path.nonzero()[0]),
list(dir_reg_path.nonzero()[1])):
self.GO2UP_Reachable_nodes[self.Num2GO[idx2]] +=\
self.GO2UP_Reachable_nodes[self.Num2GO[idx1]]
if dir_reg_path[idx1, idx2] < 1.0:
log.critical("null in non-null patch")
raise Exception("null in non-null patch")
step_reach_upgrade = dict(
(key,
val +
dir_reg_path[
idx1,
idx2]) for key,
val in pre_go2up_step_reachable_nodes[
self.Num2GO[idx1]].iteritems())
for k, v in step_reach_upgrade.iteritems():
pre_go2up_step_reachable_nodes[
self.Num2GO[idx2]][k] = min(
pre_go2up_step_reachable_nodes[
self.Num2GO[idx2]].setdefault(
k, 100000), v)
for key, val in self.GO2UP_Reachable_nodes.iteritems():
self.GO2UP_Reachable_nodes[key] = list(set(val))
verify_equivalence_of_reaches(
pre_go2up_step_reachable_nodes,
self.GO2UP_Reachable_nodes)
# Now we need to invert the reach to get the set of all the primary and
# derived GO terms that describe a UP
self.UP2GO_Reachable_nodes = dict(
(key, []) for key in self.UP2GO_Dict.keys())
self.UP2GO_step_Reachable_nodes = dict(
(key, defaultdict(list)) for key in self.UP2GO_Dict.keys())
self.GO2UP_step_Reachable_nodes = dict(
(key, defaultdict(list)) for key in pre_go2up_step_reachable_nodes.keys())
for key, val_dict in pre_go2up_step_reachable_nodes.iteritems():
for k, v in val_dict.iteritems():
self.GO2UP_step_Reachable_nodes[key][v].append(k)
self.UP2GO_step_Reachable_nodes[k][v].append(key)
self.UP2GO_Reachable_nodes[k].append(key)
# and finally we compute the pure and weighted informativity for each
# term
self.GO2_Pure_Inf = dict((key, self.calculate_informativity(len(val)))
for key, val in self.GO2UP_Reachable_nodes.iteritems())
self.GO2_Weighted_Ent = dict(
(key,
self.calculate_informativity(special_sum(val_dict)))
for key, val_dict in self.GO2UP_step_Reachable_nodes.iteritems())
def get_laplacians(self):
"""
Recovers the Laplacian (information conductance) matrixes for the GO annotation terms.
For weighted laplacian, currently implements a Max-Ent with custom factor as transition
price.
:warning: for this method to function, get_GO reach function must be run first.
:warning: accounting for regulatory relation relation between the GO terms is performed
if has been done in the adjunction matrix computation
"""
base_matrix = -copy(self.dir_adj_matrix)
nz_list = copy(
zip(list(base_matrix.nonzero()[0]), list(base_matrix.nonzero()[1])))
for idx1, idx2 in nz_list:
min_inf = min(
self.GO2_Pure_Inf[
self.Num2GO[idx1]], self.GO2_Pure_Inf[
self.Num2GO[idx2]])
base_matrix[idx1, idx2] = -min_inf
base_matrix[idx2, idx1] = -min_inf
base_matrix[idx2, idx2] += min_inf
base_matrix[idx1, idx1] += min_inf
self.laplacian_matrix = base_matrix
def compute_uniprot_dict(self):
"""
Computes the uniprot method required by some other dictionary
:return:
"""
uniprot_dict = {}
for elt in self.interactome_interface_instance.reached_uniprots_neo4j_id_list:
node = DatabaseGraph.get(elt, 'UNIPROT')
alt_id = node.properties['legacyId']
uniprot_dict[alt_id] = (
elt, self.interactome_interface_instance.neo4j_id_2_display_name[elt])
uniprot_dict[elt] = alt_id
pickle.dump(uniprot_dict, file(Dumps.Up_dict_dump, 'w'))
return uniprot_dict
def filter_out_too_specific(self):
"""
Filters out GO terms that are too specific and builds a directed, undirected adjacency
maps and laplacian.
"""
rep_val = self.calculate_informativity(self.ultraspec_lvl)
self.ultraspec_cleaned = True
ultraspec_go_terms = list(GO
for GO, reach
in self.GO2UP_Reachable_nodes.iteritems()
if len(reach) < self.ultraspec_lvl)
for GO in ultraspec_go_terms:
self.GO2_Pure_Inf[GO] = rep_val
def md5_hash(self):
"""
Return the MD hash of self to ensure that all the defining properties have been
correctly defined before dump/retrieval
"""
sorted_initset = sorted(self.InitSet)
data = [
self.go_namespace_filter,
sorted_initset,
self.correction_factor,
self.ultraspec_cleaned,
self.ultraspec_lvl]
md5 = hashlib.md5(json.dumps(data, sort_keys=True)).hexdigest()
return str(md5)
def inflate_matrix_and_indexes(self):
"""
Performs the laplacian matrix inflation to incorporate the uniprots on which we
will be running the
"""
# branching distribution: at least 10x the biggest conductivity of the
# system, unless too specific, in which case ~ specific level
self.binding_intensity = 10 * self.calculate_informativity(self.ultraspec_lvl)
fixed_index = self.laplacian_matrix.shape[0]
self_connectable_uniprots = list(
set(self.InitSet) - set(self.UPs_without_GO))
up2idxs = dict((UP, fixed_index + Idx)
for Idx, UP in enumerate(self_connectable_uniprots))
idx2ups = dict((Idx, UP) for UP, Idx in up2idxs.iteritems())
self.inflated_Laplacian = lil_matrix(
(self.laplacian_matrix.shape[0] + len(self_connectable_uniprots),
self.laplacian_matrix.shape[1] + len(self_connectable_uniprots)))
self.inflated_Laplacian[:self.laplacian_matrix.shape[0], :self.laplacian_matrix.shape[1]] =\
self.laplacian_matrix
for uniprot in self_connectable_uniprots:
for go_term in self.UP2GO_Dict[uniprot]:
self.inflated_Laplacian[
up2idxs[uniprot], up2idxs[uniprot]] += self.binding_intensity
self.inflated_Laplacian[
self.GO2Num[go_term], self.GO2Num[go_term]] += self.binding_intensity
self.inflated_Laplacian[
self.GO2Num[go_term], up2idxs[uniprot]] -= self.binding_intensity
self.inflated_Laplacian[
up2idxs[uniprot], self.GO2Num[go_term]] -= self.binding_intensity
self.inflated_lbl2idx = copy(self.GO2Num)
self.inflated_lbl2idx.update(up2idxs)
self.inflated_idx2lbl = copy(self.Num2GO)
self.inflated_idx2lbl.update(idx2ups)
def set_uniprot_source(self, uniprots):
"""
Sets the reached_uniprots_neo4j_id_list on which the circulation computation routines
will be performed by the otehr methods.Avoids passing as argument large lists of parameters.
:param uniprots: List of node IDs of the uniprots on which we would like to perform
current computations
:raise Warning: if the uniprots were not present in the set of GOs for which we
built the system or had no GO attached to them
"""
if not set(uniprots) <= set(self.UP2GO_Dict.keys()):
na_set = set(uniprots) - set(self.UP2GO_Dict.keys())
log.warning('%s uniprots out of %s either were not present in the constructions set '
'or have no GO terms attached to them.', len(na_set), len(set(uniprots)))
log.debug('full list of uniprots that cannot be analyzed: \n%s', na_set)
self.analytic_uniprots = [
uniprot for uniprot in uniprots if uniprot in self.UP2GO_Dict.keys()]
def build_extended_conduction_system(
self,
memoized=True,
sourced=False,
incremental=False,
cancellation=True,
sparse_samples=False):
"""
Builds a conduction matrix that integrates uniprots, in order to allow an easier
knowledge flow analysis
:param memoized: if the tensions and individual relation matrices should be stored in
the matrix and dumped at the end computation (required for submatrix re-computation)
:param sourced: if true, all the relations will be looked up and not computed. Useful
for the retrieval of sub-circulation group, but requires the
uniprots_2_voltage_and_circulation to be pre-filled
:param incremental: if True, all the circulation computation will be added to the
existing ones. Useful for the computation of particularly big systems with
intermediate dumps
:param cancellation: divides the final current by #Nodes**2/2, i.e. makes the currents
comparable between circulation systems of different sizes.
:param sparse_samples: if set to an integer the sampling will be sparse and not dense,
i.e. instead of computation for each node pair, only an estimation will be made, equal to
computing sparse_samples association with other randomly chosen nodes
:type sparse_samples: int
:return: adjusted conduction system
"""
if not incremental or self.current_accumulator == np.zeros((2, 2)):
self.current_accumulator = lil_matrix(self.inflated_Laplacian.shape)
self.UP2UP_voltages = {}
if not sourced:
self.uniprots_2_voltage_and_circulation = {}
iterator = []
if sparse_samples:
for _ in range(0, sparse_samples):
_length = copy(self.analytic_uniprots)
random.shuffle(_length)
iterator += zip(_length[:len(_length) / 2], _length[len(_length) / 2:])
self.uncomplete_compute = True
else:
iterator = combinations(self.analytic_uniprots, 2)
iterator = [item for item in iterator]
total_pairs = len(iterator)
breakpoints = 300
previous_time = time()
for counter, (UP1, UP2) in enumerate(iterator):
if sourced:
self.current_accumulator = self.current_accumulator + \
cr.sparse_abs(self.uniprots_2_voltage_and_circulation[
tuple(sorted((UP1, UP2)))][1])
continue
idx1, idx2 = (self.inflated_lbl2idx[UP1], self.inflated_lbl2idx[UP2])
pre_reach = self.UP2GO_Reachable_nodes[UP1] + \
self.UP2GO_Reachable_nodes[UP2] + [UP1] + [UP2]
reach = [self.inflated_lbl2idx[label] for label in pre_reach]
current_upper, voltage_diff = cr.group_edge_current_with_limitations(
inflated_laplacian=self.inflated_Laplacian,
idx_pair=(idx1, idx2),
reach_limiter=reach)
self.current_accumulator = self.current_accumulator +\
cr.sparse_abs(current_upper)
self.UP2UP_voltages[(UP1, UP2)] = voltage_diff
if memoized:
self.uniprots_2_voltage_and_circulation[
tuple(sorted((UP1, UP2)))] = \
(voltage_diff, current_upper)
if counter % breakpoints == 0 and counter > 1:
compops = float(breakpoints) / (time() - previous_time)
log.info("progress: %s/%s, current speed: %s compops, time remaining: %s min"
% (counter, total_pairs, compops, (total_pairs - counter) / compops / 60))
previous_time = time()
if cancellation: # TODO: factor that one into the Conduction Routilens
ln = len(self.analytic_uniprots)
self.current_accumulator /= (ln * (ln - 1) / 2)
if memoized:
self.dump_memoized()
index_current = cr.get_current_through_nodes(self.current_accumulator)
self.node_current = dict(
(self.inflated_idx2lbl[idx],
val) for idx,
val in enumerate(index_current))
def format_node_props(self, node_current, limit=0.01):
"""
Formats the nodes for the analysis by in the knowledge_access_analysis module
:param node_current: Current through the GO nodes
:param limit: hard limit to go_namespace_filter out the GO terms with too little current
(compensates the minor currents in the gird)
:return: {GO:[node current, pure GO informativity, Number of reachable nodes]}
"""
char_dict = {}
limiting_current = max(node_current.values()) * limit
for go_term in self.GO2Num.iterkeys():
if node_current[go_term] > limiting_current:
char_dict[go_term] = [
node_current[go_term],
self.GO2_Pure_Inf[go_term],
len(self.GO2UP_Reachable_nodes[go_term])]
return char_dict
def export_conduction_system(self):
"""
Computes the conduction system of the GO terms and exports it to the GDF format
and flushes it into a file that can be viewed with Gephi
:raise Warning:
"""
node_char_names = [
'Current',
'Type',
'Legacy_ID',
'Names',
'Pure_informativity',
'Confusion_potential']
node_char_types = [
'DOUBLE',
'VARCHAR',
'VARCHAR',
'VARCHAR',
'DOUBLE',
'DOUBLE']
char_dict = {}
if self.uncomplete_compute:
log.warning('Links between the elements should not be trusted: the computations was '
'sampling and was not complete')
for GO in self.GO2Num.iterkeys():
char_dict[GO] = [str(self.node_current[GO]),
'GO', self.GO_Legacy_IDs[GO],
self.GO_Names[GO].replace(',', '-'),
str(self.GO2_Pure_Inf[GO]),
str(len(self.GO2UP_Reachable_nodes[GO]))]
for UP in self.analytic_uniprots:
char_dict[UP] = [str(self.node_current[UP]),
'UP', self.UP_Names[UP][0],
str(self.UP_Names[UP][1]).replace(',', '-'),
str(self.binding_intensity),
'1']
gdf_exporter = GdfExportInterface(
target_fname=Outputs.GO_GDF_output,
field_names=node_char_names,
field_types=node_char_types,
node_properties_dict=char_dict,
min_current=0.01,
index_2_label=self.inflated_idx2lbl,
label_2_index=self.inflated_lbl2idx,
current_matrix=self.current_accumulator)
gdf_exporter.write()
def export_subsystem(self, uniprot_system, uniprot_subsystem):
"""
Exports the subsystem of reached_uniprots_neo4j_id_list and circulation between
them based on a larger precalculated system.This is possible only of the memoization
parameter was on during the execution of "build_extended_circulation_system()"
function execution.
:param uniprot_system: The set of uniprots for which the larger system was calculated
:param uniprot_subsystem: the set of reached_uniprots_neo4j_id_list we are interested in
:raise Exception: if the set of uniprots for which the larger system was calculated
doesn't correspond to what is stored in the dumps
"""
current_recombinator = self.undump_memoized()
if not set(uniprot_system) == set(
pickle.loads(current_recombinator['UPs'])):
raise Exception('Wrong UP system re-analyzed')
self.uniprots_2_voltage_and_circulation = pickle.loads(
current_recombinator['voltages'])
self.set_uniprot_source(uniprot_subsystem)
self.build_extended_conduction_system(memoized=False, sourced=True)
self.export_conduction_system()
def randomly_sample(
self,
samples_size,
samples_each_size,
sparse_rounds=False,
chromosome_specific=False,
memoized=False,
no_add=False):
"""
Randomly samples the set of reached_uniprots_neo4j_id_list used to create the model.
This is the null model creation routine
:param samples_size: list of numbers of uniprots we would like to create the model for
:param samples_each_size: how many times we would like to sample each unirot number
:param sparse_rounds: if we want to use sparse sampling
(usefull in case of large uniprot sets),
we would use this option
:param chromosome_specific: if we want the sampling to be chromosome-specific,
set this parameter to the
number of chromosome to sample from
:param memoized: if set to True, the sampling would be rememberd for export.
Usefull in case of the chromosome comparison
:param no_add: if set to True, the result of sampling will not be added to the database
of samples. Usefull if re-running tests with similar parameters several times.
:raise Exception: if the number of items in the samples size ann saples_each size are
different
"""
if not len(samples_size) == len(samples_each_size):
raise Exception('Not the same list sizes!')
self_connectable_uniprots = list(
set(self.InitSet) - set(self.UPs_without_GO))
if chromosome_specific:
self_connectable_uniprots = list(set(self_connectable_uniprots).intersection(
set(self.interactome_interface_instance.chromosomes_2_uniprot[str(
chromosome_specific)])))
for sample_size, iterations in zip(samples_size, samples_each_size):
sample_size = min(sample_size, len(self_connectable_uniprots))
for i in range(0, iterations):
shuffle(self_connectable_uniprots)
analytics_up_list = self_connectable_uniprots[:sample_size]
self.set_uniprot_source(analytics_up_list)
self.build_extended_conduction_system(
memoized=memoized, sourced=False, sparse_samples=sparse_rounds)
md5 = hashlib.md5(
json.dumps(
sorted(analytics_up_list),
sort_keys=True)).hexdigest()
if not no_add:
annotome_rand_samp.insert(
{
'UP_hash': md5,
'sys_hash': self.md5_hash(),
'size': sample_size,
'chrom': str(chromosome_specific),
'sparse_rounds': sparse_rounds,
'UPs': pickle.dumps(analytics_up_list),
'currents': pickle.dumps(
(self.current_accumulator,
self.node_current)),
'voltages': pickle.dumps(
self.UP2UP_voltages)})
if not sparse_rounds:
log.info(
'Random ID: %s \t Sample size: %s \t iteration: %s\t compops: %s \t time: %s ',
self.random_tag, sample_size, i,
"{0:.2f}".format(sample_size * (sample_size - 1) / 2 / self._time()),
self.pretty_time())
else:
log.info('Random ID: %s \t Sample size: %s \t iteration: %s\t compops: %s \t '
'time: %s ', self.random_tag, sample_size, i,
"{0:.2f}".format(sample_size * sparse_rounds / 2 / self._time()),
self.pretty_time())
def get_independent_linear_groups(self):
"""
Recovers independent linear groups of the GO terms. Independent linear groups are
those that share a significant amount of reached_uniprots_neo4j_id_list in common
"""
self.Indep_Lapl = lil_matrix((len(self.All_GOs), len(self.All_GOs)))
for GO_list in self.UP2GO_Reachable_nodes.itervalues():
for GO1, GO2 in combinations(GO_list, 2):
idx1, idx2 = (self.GO2Num[GO1], self.GO2Num[GO2])
self.Indep_Lapl[idx1, idx2] += -1
self.Indep_Lapl[idx2, idx1] += -1
self.Indep_Lapl[idx2, idx2] += 1
self.Indep_Lapl[idx1, idx1] += 1
if __name__ == '__main__':
# Creates an instance of MatrixGetter and loads pre-computed values
go_interface_instance = GeneOntologyInterface(uniprot_node_ids=get_background_bulbs_ids())
go_interface_instance.full_rebuild()
# loading takes 1-6 seconds.
# fill for reach only is done in 2 seconds,
# tepping takes another 15,
# inverting + info computation - 1 more second
# Laplacian building =>
##
# full computation - 3 minutes 18 seconds; save 7 seconds, retrieval - 3
# seconds
# go_interface_instance.load()
# print go_interface_instance.pretty_time()
# go_interface_instance.get_indep_linear_groups()
# go_interface_instance.dump_Indep_Linset()
# go_interface_instance.randomly_sample([10, 25], [5]*2, chromosome_specific=15)
# go_interface_instance.set_Uniprot_source(experimental)
# go_interface_instance.build_extended_conduction_system(sparse_samples=10)
# go_interface_instance.export_conduction_system()
# go_interface_instance.export_subsystem(experimental, ['186958', '142401', '147798', '164077'])
# data_array = np.array([log(val) for val in go_interface_instance.GO2_Pure_Inf.itervalues()])
# hist(data_array, 100, log=True)
# show() | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/bower_components/web-animations-js/src/timing-utilities.js |
(function(shared, testing) {
var fills = 'backwards|forwards|both|none'.split('|');
var directions = 'reverse|alternate|alternate-reverse'.split('|');
var linear = function(x) { return x; };
function cloneTimingInput(timingInput) {
if (typeof timingInput == 'number') {
return timingInput;
}
var clone = {};
for (var m in timingInput) {
clone[m] = timingInput[m];
}
return clone;
}
function AnimationEffectTiming() {
this._delay = 0;
this._endDelay = 0;
this._fill = 'none';
this._iterationStart = 0;
this._iterations = 1;
this._duration = 0;
this._playbackRate = 1;
this._direction = 'normal';
this._easing = 'linear';
this._easingFunction = linear;
}
function isInvalidTimingDeprecated() {
return shared.isDeprecated('Invalid timing inputs', '2016-03-02', 'TypeError exceptions will be thrown instead.', true);
}
AnimationEffectTiming.prototype = {
_setMember: function(member, value) {
this['_' + member] = value;
if (this._effect) {
this._effect._timingInput[member] = value;
this._effect._timing = shared.normalizeTimingInput(this._effect._timingInput);
this._effect.activeDuration = shared.calculateActiveDuration(this._effect._timing);
if (this._effect._animation) {
this._effect._animation._rebuildUnderlyingAnimation();
}
}
},
get playbackRate() {
return this._playbackRate;
},
set delay(value) {
this._setMember('delay', value);
},
get delay() {
return this._delay;
},
set endDelay(value) {
this._setMember('endDelay', value);
},
get endDelay() {
return this._endDelay;
},
set fill(value) {
this._setMember('fill', value);
},
get fill() {
return this._fill;
},
set iterationStart(value) {
if ((isNaN(value) || value < 0) && isInvalidTimingDeprecated()) {
throw new TypeError('iterationStart must be a non-negative number, received: ' + timing.iterationStart);
}
this._setMember('iterationStart', value);
},
get iterationStart() {
return this._iterationStart;
},
set duration(value) {
if (value != 'auto' && (isNaN(value) || value < 0) && isInvalidTimingDeprecated()) {
throw new TypeError('duration must be non-negative or auto, received: ' + value);
}
this._setMember('duration', value);
},
get duration() {
return this._duration;
},
set direction(value) {
this._setMember('direction', value);
},
get direction() {
return this._direction;
},
set easing(value) {
this._easingFunction = parseEasingFunction(normalizeEasing(value));
this._setMember('easing', value);
},
get easing() {
return this._easing;
},
set iterations(value) {
if ((isNaN(value) || value < 0) && isInvalidTimingDeprecated()) {
throw new TypeError('iterations must be non-negative, received: ' + value);
}
this._setMember('iterations', value);
},
get iterations() {
return this._iterations;
}
};
function makeTiming(timingInput, forGroup, effect) {
var timing = new AnimationEffectTiming();
if (forGroup) {
timing.fill = 'both';
timing.duration = 'auto';
}
if (typeof timingInput == 'number' && !isNaN(timingInput)) {
timing.duration = timingInput;
} else if (timingInput !== undefined) {
Object.getOwnPropertyNames(timingInput).forEach(function(property) {
if (timingInput[property] != 'auto') {
if (typeof timing[property] == 'number' || property == 'duration') {
if (typeof timingInput[property] != 'number' || isNaN(timingInput[property])) {
return;
}
}
if ((property == 'fill') && (fills.indexOf(timingInput[property]) == -1)) {
return;
}
if ((property == 'direction') && (directions.indexOf(timingInput[property]) == -1)) {
return;
}
if (property == 'playbackRate' && timingInput[property] !== 1 && shared.isDeprecated('AnimationEffectTiming.playbackRate', '2014-11-28', 'Use Animation.playbackRate instead.')) {
return;
}
timing[property] = timingInput[property];
}
});
}
return timing;
}
function numericTimingToObject(timingInput) {
if (typeof timingInput == 'number') {
if (isNaN(timingInput)) {
timingInput = { duration: 0 };
} else {
timingInput = { duration: timingInput };
}
}
return timingInput;
}
function normalizeTimingInput(timingInput, forGroup) {
timingInput = shared.numericTimingToObject(timingInput);
return makeTiming(timingInput, forGroup);
}
function cubic(a, b, c, d) {
if (a < 0 || a > 1 || c < 0 || c > 1) {
return linear;
}
return function(x) {
if (x <= 0) {
var start_gradient = 0;
if (a > 0)
start_gradient = b / a;
else if (!b && c > 0)
start_gradient = d / c;
return start_gradient * x;
}
if (x >= 1) {
var end_gradient = 0;
if (c < 1)
end_gradient = (d - 1) / (c - 1);
else if (c == 1 && a < 1)
end_gradient = (b - 1) / (a - 1);
return 1 + end_gradient * (x - 1);
}
var start = 0, end = 1;
while (start < end) {
var mid = (start + end) / 2;
function f(a, b, m) { return 3 * a * (1 - m) * (1 - m) * m + 3 * b * (1 - m) * m * m + m * m * m};
var xEst = f(a, c, mid);
if (Math.abs(x - xEst) < 0.00001) {
return f(b, d, mid);
}
if (xEst < x) {
start = mid;
} else {
end = mid;
}
}
return f(b, d, mid);
}
}
var Start = 1;
var Middle = 0.5;
var End = 0;
function step(count, pos) {
return function(x) {
if (x >= 1) {
return 1;
}
var stepSize = 1 / count;
x += pos * stepSize;
return x - x % stepSize;
}
}
var presets = {
'ease': cubic(0.25, 0.1, 0.25, 1),
'ease-in': cubic(0.42, 0, 1, 1),
'ease-out': cubic(0, 0, 0.58, 1),
'ease-in-out': cubic(0.42, 0, 0.58, 1),
'step-start': step(1, Start),
'step-middle': step(1, Middle),
'step-end': step(1, End)
};
var styleForCleaning = null;
var numberString = '\\s*(-?\\d+\\.?\\d*|-?\\.\\d+)\\s*';
var cubicBezierRe = new RegExp('cubic-bezier\\(' + numberString + ',' + numberString + ',' + numberString + ',' + numberString + '\\)');
var stepRe = /steps\(\s*(\d+)\s*,\s*(start|middle|end)\s*\)/;
function normalizeEasing(easing) {
if (!styleForCleaning) {
styleForCleaning = document.createElement('div').style;
}
styleForCleaning.animationTimingFunction = '';
styleForCleaning.animationTimingFunction = easing;
var normalizedEasing = styleForCleaning.animationTimingFunction;
if (normalizedEasing == '' && isInvalidTimingDeprecated()) {
throw new TypeError(easing + ' is not a valid value for easing');
}
return normalizedEasing;
}
function parseEasingFunction(normalizedEasing) {
if (normalizedEasing == 'linear') {
return linear;
}
var cubicData = cubicBezierRe.exec(normalizedEasing);
if (cubicData) {
return cubic.apply(this, cubicData.slice(1).map(Number));
}
var stepData = stepRe.exec(normalizedEasing);
if (stepData) {
return step(Number(stepData[1]), {'start': Start, 'middle': Middle, 'end': End}[stepData[2]]);
}
var preset = presets[normalizedEasing];
if (preset) {
return preset;
}
// At this point none of our parse attempts succeeded; the easing is invalid.
// Fall back to linear in the interest of not crashing the page.
return linear;
}
function calculateActiveDuration(timing) {
return Math.abs(repeatedDuration(timing) / timing.playbackRate);
}
function repeatedDuration(timing) {
// https://w3c.github.io/web-animations/#calculating-the-active-duration
if (timing.duration === 0 || timing.iterations === 0) {
return 0;
}
return timing.duration * timing.iterations;
}
var PhaseNone = 0;
var PhaseBefore = 1;
var PhaseAfter = 2;
var PhaseActive = 3;
function calculatePhase(activeDuration, localTime, timing) {
// https://w3c.github.io/web-animations/#animation-effect-phases-and-states
if (localTime == null) {
return PhaseNone;
}
var endTime = timing.delay + activeDuration + timing.endDelay;
if (localTime < Math.min(timing.delay, endTime)) {
return PhaseBefore;
}
if (localTime >= Math.min(timing.delay + activeDuration, endTime)) {
return PhaseAfter;
}
return PhaseActive;
}
function calculateActiveTime(activeDuration, fillMode, localTime, phase, delay) {
// https://w3c.github.io/web-animations/#calculating-the-active-time
switch (phase) {
case PhaseBefore:
if (fillMode == 'backwards' || fillMode == 'both')
return 0;
return null;
case PhaseActive:
return localTime - delay;
case PhaseAfter:
if (fillMode == 'forwards' || fillMode == 'both')
return activeDuration;
return null;
case PhaseNone:
return null;
}
}
function calculateOverallProgress(iterationDuration, phase, iterations, activeTime, iterationStart) {
// https://w3c.github.io/web-animations/#calculating-the-overall-progress
var overallProgress = iterationStart;
if (iterationDuration === 0) {
if (phase !== PhaseBefore) {
overallProgress += iterations;
}
} else {
overallProgress += activeTime / iterationDuration;
}
return overallProgress;
}
function calculateSimpleIterationProgress(overallProgress, iterationStart, phase, iterations, activeTime, iterationDuration) {
// https://w3c.github.io/web-animations/#calculating-the-simple-iteration-progress
var simpleIterationProgress = (overallProgress === Infinity) ? iterationStart % 1 : overallProgress % 1;
if (simpleIterationProgress === 0 && phase === PhaseAfter && iterations !== 0 &&
(activeTime !== 0 || iterationDuration === 0)) {
simpleIterationProgress = 1;
}
return simpleIterationProgress;
}
function calculateCurrentIteration(phase, iterations, simpleIterationProgress, overallProgress) {
// https://w3c.github.io/web-animations/#calculating-the-current-iteration
if (phase === PhaseAfter && iterations === Infinity) {
return Infinity;
}
if (simpleIterationProgress === 1) {
return Math.floor(overallProgress) - 1;
}
return Math.floor(overallProgress);
}
function calculateDirectedProgress(playbackDirection, currentIteration, simpleIterationProgress) {
// https://w3c.github.io/web-animations/#calculating-the-directed-progress
var currentDirection = playbackDirection;
if (playbackDirection !== 'normal' && playbackDirection !== 'reverse') {
var d = currentIteration;
if (playbackDirection === 'alternate-reverse') {
d += 1;
}
currentDirection = 'normal';
if (d !== Infinity && d % 2 !== 0) {
currentDirection = 'reverse';
}
}
if (currentDirection === 'normal') {
return simpleIterationProgress;
}
return 1 - simpleIterationProgress;
}
function calculateIterationProgress(activeDuration, localTime, timing) {
var phase = calculatePhase(activeDuration, localTime, timing);
var activeTime = calculateActiveTime(activeDuration, timing.fill, localTime, phase, timing.delay);
if (activeTime === null)
return null;
var overallProgress = calculateOverallProgress(timing.duration, phase, timing.iterations, activeTime, timing.iterationStart);
var simpleIterationProgress = calculateSimpleIterationProgress(overallProgress, timing.iterationStart, phase, timing.iterations, activeTime, timing.duration);
var currentIteration = calculateCurrentIteration(phase, timing.iterations, simpleIterationProgress, overallProgress);
var directedProgress = calculateDirectedProgress(timing.direction, currentIteration, simpleIterationProgress);
// https://w3c.github.io/web-animations/#calculating-the-transformed-progress
// https://w3c.github.io/web-animations/#calculating-the-iteration-progress
return timing._easingFunction(directedProgress);
}
shared.cloneTimingInput = cloneTimingInput;
shared.makeTiming = makeTiming;
shared.numericTimingToObject = numericTimingToObject;
shared.normalizeTimingInput = normalizeTimingInput;
shared.calculateActiveDuration = calculateActiveDuration;
shared.calculateIterationProgress = calculateIterationProgress;
shared.calculatePhase = calculatePhase;
shared.normalizeEasing = normalizeEasing;
shared.parseEasingFunction = parseEasingFunction;
if (WEB_ANIMATIONS_TESTING) {
testing.normalizeTimingInput = normalizeTimingInput;
testing.normalizeEasing = normalizeEasing;
testing.parseEasingFunction = parseEasingFunction;
testing.calculateActiveDuration = calculateActiveDuration;
testing.calculatePhase = calculatePhase;
testing.PhaseNone = PhaseNone;
testing.PhaseBefore = PhaseBefore;
testing.PhaseActive = PhaseActive;
testing.PhaseAfter = PhaseAfter;
}
})(webAnimationsShared, webAnimationsTesting); | PypiClean |
/CsuPTMD-1.0.12.tar.gz/CsuPTMD-1.0.12/PTMD/maskrcnn_benchmark/apex/apex/fp16_utils/fp16_optimizer.py | import torch
from torch import nn
from torch.autograd import Variable
from torch.nn.parameter import Parameter
from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors
from ..amp._amp_state import _amp_state, maybe_print
from ..amp.scaler import LossScaler
from ..multi_tensor_apply import multi_tensor_applier
from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm
# TODO: Update overflow check + downscale to use Carl's fused kernel.
class FP16_Optimizer(object):
def __init__(self,
init_optimizer,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=True):
print("Warning: FP16_Optimizer is deprecated and dangerous, and will be deleted soon. "
"If it still works, you're probably getting lucky. "
"For mixed precision, use the documented API https://nvidia.github.io/apex/amp.html, with opt_level=O1.")
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.verbose = verbose
self.optimizer = init_optimizer
# init_state_dict sets up an alternative way to cast per-param state tensors.
# Stashing here in case https://github.com/pytorch/pytorch/issues/7733 makes it necessary.
# init_state_dict = init_optimizer.state_dict()
self.fp16_groups = []
self.fp32_from_fp16_groups = []
self.fp32_from_fp32_groups = []
for i, param_group in enumerate(self.optimizer.param_groups):
self.maybe_print("FP16_Optimizer processing param group {}:".format(i))
fp16_params_this_group = []
fp32_params_this_group = []
fp32_from_fp16_params_this_group = []
for i, param in enumerate(param_group['params']):
if param.requires_grad:
if param.type() == 'torch.cuda.HalfTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.HalfTensor with {}"
.format(param.size()))
fp16_params_this_group.append(param)
master_param = param.detach().clone().float()
master_param.requires_grad = True
param_group['params'][i] = master_param
fp32_from_fp16_params_this_group.append(master_param)
# Reset existing state dict key to the new master param.
# We still need to recast per-param state tensors, if any, to FP32.
if param in self.optimizer.state:
self.optimizer.state[master_param] = self.optimizer.state.pop(param)
elif param.type() == 'torch.cuda.FloatTensor':
self.maybe_print("FP16_Optimizer received torch.cuda.FloatTensor with {}"
.format(param.size()))
fp32_params_this_group.append(param)
param_group['params'][i] = param
else:
raise TypeError("Wrapped parameters must be either "
"torch.cuda.FloatTensor or torch.cuda.HalfTensor. "
"Received {}".format(param.type()))
self.fp16_groups.append(fp16_params_this_group)
self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group)
self.fp32_from_fp32_groups.append(fp32_params_this_group)
self.all_fp16_params = []
for group in self.fp16_groups:
self.all_fp16_params += group
self.all_fp32_from_fp16_params = []
for group in self.fp32_from_fp16_groups:
self.all_fp32_from_fp16_params += group
self.all_fp32_from_fp32_params = []
for group in self.fp32_from_fp32_groups:
self.all_fp32_from_fp32_params += group
# Leverage state_dict() and load_state_dict() to recast preexisting per-param state tensors
self.optimizer.load_state_dict(self.optimizer.state_dict())
# alternative way to cast per-param state tensors:
# self.optimizer.load_state_dict(init_state_dict)
if dynamic_loss_scale:
self.dynamic_loss_scale = True
if dynamic_loss_args is not None:
self.loss_scaler = LossScaler("dynamic", **dynamic_loss_args)
else:
self.loss_scaler = LossScaler("dynamic")
else:
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(static_loss_scale)
self.overflow = False
self.first_closure_call_this_step = True
self.clip_grad_norm = clip_grad_norm
# TODO: Centralize exposure and import error checking for the C backend.
if multi_tensor_applier.available:
import amp_C
self.multi_tensor_scale = amp_C.multi_tensor_scale
self._dummy_overflow_buf = torch.cuda.IntTensor([0]);
# Having self.maybe_print distinct from _amp_state.maybe_print is another artifact
# of having to support FP16_Optimizer separately, for the time being.
def maybe_print(self, msg):
if self.verbose:
print(msg)
def __getstate__(self):
raise RuntimeError("FP16_Optimizer should be serialized using state_dict().")
def __setstate__(self, state):
raise RuntimeError("FP16_Optimizer should be deserialized using load_state_dict().")
def zero_grad(self, set_grads_to_None=False):
"""
Zero fp32 and fp16 parameter grads.
"""
# In principle, only the .grad attributes of the model params need to be zeroed,
# because gradients are copied into the FP32 master params. However, we zero
# all gradients owned by the optimizer, just to be safe:
for group in self.optimizer.param_groups:
for p in group['params']:
if set_grads_to_None:
p.grad = None
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
# Zero fp16 gradients owned by the model:
for fp16_group in self.fp16_groups:
for param in fp16_group:
if set_grads_to_None:
param.grad = None
else:
if param.grad is not None:
param.grad.detach_() # as in torch.optim.optimizer.zero_grad()
param.grad.zero_()
# Should not be used anymore.
# def _check_overflow(self):
# params = []
# for group in self.fp16_groups:
# for param in group:
# params.append(param)
# for group in self.fp32_from_fp32_groups:
# for param in group:
# params.append(param)
# self.overflow = self.loss_scaler.has_overflow(params)
# def _update_scale(self, has_overflow=False):
# self.loss_scaler.update_scale(has_overflow)
def _master_params_to_model_params(self):
if multi_tensor_applier.available:
if len(self.all_fp16_params) > 0:
multi_tensor_applier(
self.multi_tensor_scale,
self._dummy_overflow_buf,
[self.all_fp32_from_fp16_params, self.all_fp16_params],
1.0)
else:
for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
master_params_to_model_params(fp16_group, fp32_from_fp16_group)
# To consider: Integrate distributed with this wrapper by registering a hook on each variable
# that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream.
# def _model_grads_to_master_grads(self):
# for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups):
# model_grads_to_master_grads(fp16_group, fp32_from_fp16_group)
# def _downscale_master(self):
# if self.loss_scale != 1.0:
# for group in self.optimizer.param_groups:
# for param in group['params']:
# if param.grad is not None:
# param.grad.data.mul_(1./self.loss_scale)
def clip_master_grads(self, max_norm, norm_type=2):
"""
Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``.
Args:
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the current fp32 gradients (viewed as a single vector).
.. warning::
Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``).
"""
if not self.overflow:
fp32_params = []
for param_group in self.optimizer.param_groups:
for param in param_group['params']:
fp32_params.append(param)
return self.clip_grad_norm(fp32_params, max_norm, norm_type)
else:
return -1
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step
state_dict['optimizer_state_dict'] = self.optimizer.state_dict()
state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups
return state_dict
def load_state_dict(self, state_dict):
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict['loss_scaler']
self.dynamic_loss_scale = state_dict['dynamic_loss_scale']
self.overflow = state_dict['overflow']
self.first_closure_call_this_step = state_dict['first_closure_call_this_step']
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 2.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']):
for current, saved in zip(current_group, saved_group):
current.data.copy_(saved.data)
def step(self, closure=None): # could add clip option.
"""
If no closure is supplied, :attr:`step` should be called after
``fp16_optimizer_obj.backward(loss)``.
:attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to
:class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params
originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run
another forward pass using their model.
If a closure is supplied, :attr:`step` may be called without a prior call to
:attr:`backward(loss)`.
This control flow is identical to `ordinary Pytorch optimizer use`_ with closures.
However, the user should take care that any ``loss.backward()`` call within the closure
has been replaced by ``fp16_optimizer_obj.backward(loss)``.
Args:
closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss.
Example with closure::
# optimizer is assumed to be an FP16_Optimizer object, previously constructed from an
# existing pytorch optimizer.
for input, target in dataset:
def closure():
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
# loss.backward() becomes:
optimizer.backward(loss)
return loss
optimizer.step(closure)
.. warning::
Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling.
.. _`ordinary Pytorch optimizer use`:
http://pytorch.org/docs/master/optim.html#optimizer-step-closure
"""
scale = self.loss_scaler.loss_scale()
# To consider: Should this be in step(), or update_master_grads? It works either way,
# but I should make it consistent with the Amp control flow, which updates the scale
# during backward context manager exit.
# self._update_scale(self.overflow)
if self.overflow:
# Using _amp_state.maybe_print instead of self.print here is intentional.
maybe_print("Gradient overflow. Skipping step, reducing " +
"loss scale to {}".format(self.loss_scaler.loss_scale()))
return
if closure is not None:
retval = self._step_with_closure(closure)
else:
# torch.cuda.nvtx.range_push("pytorch optimizer step")
retval = self.optimizer.step()
# torch.cuda.nvtx.range_pop()
self._master_params_to_model_params()
return retval
def _step_with_closure(self, closure):
def wrapped_closure():
# helpful for debugging
# print("Calling wrapped_closure, first_closure_call_this_step = {}"
# .format(self.first_closure_call_this_step))
if self.first_closure_call_this_step:
# We expect that the fp16 params are initially fresh on entering self.step(),
# so _master_params_to_model_params() is unnecessary the first time wrapped_closure()
# is called within self.optimizer.step().
self.first_closure_call_this_step = False
else:
# If self.optimizer.step() internally calls wrapped_closure more than once,
# it may update the fp32 params after each call. However, self.optimizer
# doesn't know about the fp16 params at all. If the fp32 params get updated,
# we can't rely on self.optimizer to refresh the fp16 params. We need
# to handle that manually:
self._master_params_to_model_params()
# Our API expects the user to give us ownership of the backward() call by
# replacing all calls to loss.backward() with optimizer.backward(loss).
# This requirement holds whether or not the call to backward() is made within a closure.
# If the user is properly calling optimizer.backward(loss) within "closure,"
# calling closure() here will give the fp32 master params fresh gradients
# for the optimizer to play with, so all wrapped_closure needs to do is call
# closure() and return the loss.
temp_loss = closure()
while(self.overflow):
scale = self.loss_scaler.loss_scale()
# self._update_scale(self.overflow) # now done at the end of backward
print("OVERFLOW within closure! Skipping step, reducing loss scale to {}".format(
self.loss_scaler.loss_scale()))
temp_loss = closure()
return temp_loss
retval = self.optimizer.step(wrapped_closure)
self.first_closure_call_this_step = True
return retval
def backward(self, loss, update_master_grads=True, retain_graph=False):
"""
:attr:`backward` performs the following conceptual steps:
1. fp32_loss = loss.float() (see first Note below)
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined).
4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32.
5. Finally, master grads are divided by loss_scale.
In this way, after :attr:`backward`, the master params have fresh gradients,
and :attr:`step` may be called.
.. note::
:attr:`backward` internally converts the loss to fp32 before applying the loss scale.
This provides some additional safety against overflow if the user has supplied an
fp16 loss value.
However, for maximum overflow safety, the user should
compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to
:attr:`backward`.
.. warning::
The gradients found in a model's leaves after the call to
:attr:`backward` should not be regarded as valid in general,
because it's possible
they have been scaled (and in the case of dynamic loss scaling,
the scale factor may change over time).
If the user wants to inspect gradients after a call to :attr:`backward`,
only the master gradients should be regarded as valid. These can be retrieved via
:attr:`inspect_master_grad_data()`.
Args:
loss: The loss output by the user's model. loss may be either float or half (but see first Note above).
update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`.
retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below).
Example::
# Ordinary operation:
optimizer.backward(loss)
# Naive operation with multiple losses (technically valid, but less efficient):
# fp32 grads will be correct after the second call, but
# the first call incurs an unnecessary fp16->fp32 grad copy.
optimizer.backward(loss1)
optimizer.backward(loss2)
# More efficient way to handle multiple losses:
# The fp16->fp32 grad copy is delayed until fp16 grads from all
# losses have been accumulated.
optimizer.backward(loss1, update_master_grads=False)
optimizer.backward(loss2, update_master_grads=False)
optimizer.update_master_grads()
"""
# To consider: try multiple backward passes using retain_grad=True to find
# a loss scale that works. After you find a loss scale that works, do a final dummy
# backward pass with retain_graph=False to tear down the graph. Doing this would avoid
# discarding the iteration, but probably wouldn't improve overall efficiency.
scaled_loss = loss.float()*self.loss_scaler.loss_scale()
scaled_loss.backward(retain_graph=retain_graph)
if update_master_grads:
self.update_master_grads()
def update_master_grads(self):
# torch.cuda.nvtx.range_push("update_master_grads")
"""
Copy the ``.grad`` attribute from stored references to fp16 parameters to
the ``.grad`` attribute of the fp32 master parameters that are directly
updated by the optimizer. :attr:`update_master_grads` only needs to be called if
``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``.
"""
# if self.dynamic_loss_scale:
# self._check_overflow()
# if self.overflow: return
# self._model_grads_to_master_grads()
# self._downscale_master()
# Use the one-shot multi-tensor apply kernel
self.loss_scaler.clear_overflow_state()
if len(self.all_fp16_params) > 0:
# print("Model grads before")
# print([param.grad.data for param in self.all_fp16_params])
# I'm ONLY writing this as an incremental way to make some tests pass until
# I can refactor the tests as well.
# FP16_Optimizer should not be used by anyone.
model_grads = []
master_grads = []
for model_param, master_param in zip(self.all_fp16_params,
self.all_fp32_from_fp16_params):
if model_param.grad is not None:
model_grads.append(model_param.grad)
if master_param.grad is None:
master_param.grad = torch.empty_like(master_param)
master_grads.append(master_param.grad)
self.loss_scaler.unscale(
model_grads,
master_grads,
self.loss_scaler.loss_scale())
# print("Master grads after")
# print([param.grad.data for param in self.all_fp32_from_fp16_params])
if len(self.all_fp32_from_fp32_params) > 0:
model_grads = []
master_grads = []
for model_param, master_param in zip(self.all_fp32_from_fp32_params,
self.all_fp32_from_fp32_params):
if model_param.grad is not None:
model_grads.append(model_param.grad)
master_grads.append(master_param.grad)
# print("Model grads before")
# print([param.grad.data for param in self.all_fp32_from_fp32_params])
self.loss_scaler.unscale(
model_grads,
master_grads,
self.loss_scaler.loss_scale())
# print("Master grads after")
# print([param.grad.data for param in self.all_fp32_from_fp32_params])
# quit()
self.overflow = self.loss_scaler.update_scale()
# torch.cuda.nvtx.range_pop()
def inspect_master_grad_data(self):
"""
When running with :class:`FP16_Optimizer`,
``.grad`` attributes of a model's fp16 leaves should not be
regarded as truthful, because they might be scaled.
After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered,
the fp32 master params' ``.grad``
attributes will contain valid gradients properly divided by the loss scale. However,
because :class:`FP16_Optimizer` flattens some parameters, accessing them may be
nonintuitive. :attr:`inspect_master_grad_data`
allows those gradients to be viewed with shapes corresponding to their associated model leaves.
Returns:
List of lists (one list for each parameter group). The list for each parameter group
is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group.
"""
if self.overflow:
print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. "
"Gradients are currently invalid (may be inf, nan, or stale). Returning None.")
return None
else:
# The optimizer owns only references to master params.
master_grads_data = []
for param_group in self.optimizer.param_groups:
master_grads_this_group = []
for param in param_group['params']:
if param.grad is not None:
master_grads_this_group.append(param.grad.data)
else:
master_grads_this_group.append(None)
master_grads_data.append(master_grads_this_group)
return master_grads_data
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale()
def _set_loss_scale(self, value):
self.loss_scaler._loss_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups) | PypiClean |
/Caroline-presentation-0.2.4.tar.gz/Caroline-presentation-0.2.4/caroline/html_dist/js/mathjax/input/tex/extensions/mhchem.js | !function(n){var a={};function o(t){if(a[t])return a[t].exports;var e=a[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,o),e.l=!0,e.exports}o.m=n,o.c=a,o.d=function(t,e,n){o.o(t,e)||Object.defineProperty(t,e,{enumerable:!0,get:n})},o.r=function(t){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})},o.t=function(e,t){if(1&t&&(e=o(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var n=Object.create(null);if(o.r(n),Object.defineProperty(n,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var a in e)o.d(n,a,function(t){return e[t]}.bind(null,a));return n},o.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return o.d(e,"a",e),e},o.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},o.p="",o(o.s=0)}([function(t,e,n){"use strict";n(1)},function(t,e,n){"use strict";var a=n(2),o=function(t){{if(t&&t.__esModule)return t;var e={};if(null!=t)for(var n in t)Object.prototype.hasOwnProperty.call(t,n)&&(e[n]=t[n]);return e.default=t,e}}(n(3));(0,a.combineWithMathJax)({_:{input:{tex:{mhchem:{MhchemConfiguration:o}}}}})},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.combineConfig=MathJax._.components.global.combineConfig,e.combineDefaults=MathJax._.components.global.combineDefaults,e.combineWithMathJax=MathJax._.components.global.combineWithMathJax,e.MathJax=MathJax._.components.global.MathJax},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var a=n(4),o=n(5),i=n(6),r=n(7),c=n(8),u=n(9),p={};p.Macro=r.default.Macro,p.xArrow=c.default.xArrow,p.Machine=function(t,e,n){try{var a=t.GetArgument(e),o=u.mhchemParser.go(a,n),r=u.texify.go(o);t.string=r+t.string.substr(t.i),t.i=0}catch(t){throw new i.default(t[0],t[1],t.slice(2))}},new o.CommandMap("mhchem",{ce:["Machine","ce"],pu:["Machine","pu"],longrightleftharpoons:["Macro","\\stackrel{\\textstyle{-}\\!\\!{\\rightharpoonup}}{\\smash{{\\leftharpoondown}\\!\\!{-}}}"],longRightleftharpoons:["Macro","\\stackrel{\\textstyle{-}\\!\\!{\\rightharpoonup}}{\\smash{\\leftharpoondown}}"],longLeftrightharpoons:["Macro","\\stackrel{\\textstyle\\vphantom{{-}}{\\rightharpoonup}}{\\smash{{\\leftharpoondown}\\!\\!{-}}}"],longleftrightarrows:["Macro","\\stackrel{\\longrightarrow}{\\smash{\\longleftarrow}\\Rule{0px}{.25em}{0px}}"],tripledash:["Macro","\\vphantom{-}\\raise2mu{\\kern2mu\\tiny\\text{-}\\kern1mu\\text{-}\\kern1mu\\text{-}\\kern2mu}"],xrightarrow:["xArrow",8594,5,6],xleftarrow:["xArrow",8592,7,3],xleftrightarrow:["xArrow",8596,6,6],xrightleftharpoons:["xArrow",8652,5,7],xRightleftharpoons:["xArrow",8652,5,7],xLeftrightharpoons:["xArrow",8652,5,7]},p),e.MhchemConfiguration=a.Configuration.create("mhchem",{handler:{macro:["mhchem"]}})},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.Configuration=MathJax._.input.tex.Configuration.Configuration,e.ConfigurationHandler=MathJax._.input.tex.Configuration.ConfigurationHandler},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.AbstractSymbolMap=MathJax._.input.tex.SymbolMap.AbstractSymbolMap,e.RegExpMap=MathJax._.input.tex.SymbolMap.RegExpMap,e.AbstractParseMap=MathJax._.input.tex.SymbolMap.AbstractParseMap,e.CharacterMap=MathJax._.input.tex.SymbolMap.CharacterMap,e.DelimiterMap=MathJax._.input.tex.SymbolMap.DelimiterMap,e.MacroMap=MathJax._.input.tex.SymbolMap.MacroMap,e.CommandMap=MathJax._.input.tex.SymbolMap.CommandMap,e.EnvironmentMap=MathJax._.input.tex.SymbolMap.EnvironmentMap},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.default=MathJax._.input.tex.TexError.default},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.default=MathJax._.input.tex.base.BaseMethods.default},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),e.default=MathJax._.input.tex.ams.AmsMethods.default},function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var m={go:function(t,e){if(!t)return[];void 0===e&&(e="ce");var n,a="0",o={};o.parenthesisLevel=0,t=(t=(t=t.replace(/\n/g," ")).replace(/[\u2212\u2013\u2014\u2010]/g,"-")).replace(/[\u2026]/g,"...");for(var r=10,i=[];;){n!==t?(r=10,n=t):r--;var c=m.stateMachines[e],u=c.transitions[a]||c.transitions["*"];t:for(var p=0;p<u.length;p++){var s=m.patterns.match_(u[p].pattern,t);if(s){for(var _=u[p].task,l=0;l<_.action_.length;l++){var d;if(c.actions[_.action_[l].type_])d=c.actions[_.action_[l].type_](o,s.match_,_.action_[l].option);else{if(!m.actions[_.action_[l].type_])throw["MhchemBugA","mhchem bug A. Please report. ("+_.action_[l].type_+")"];d=m.actions[_.action_[l].type_](o,s.match_,_.action_[l].option)}m.concatArray(i,d)}if(a=_.nextState||a,!(0<t.length))return i;if(_.revisit||(t=s.remainder),!_.toContinue)break t}}if(r<=0)throw["MhchemBugU","mhchem bug U. Please report."]}},concatArray:function(t,e){if(e)if(Array.isArray(e))for(var n=0;n<e.length;n++)t.push(e[n]);else t.push(e)},patterns:{patterns:{empty:/^$/,else:/^./,else2:/^./,space:/^\s/,"space A":/^\s(?=[A-Z\\$])/,space$:/^\s$/,"a-z":/^[a-z]/,x:/^x/,x$:/^x$/,i$:/^i$/,letters:/^(?:[a-zA-Z\u03B1-\u03C9\u0391-\u03A9?@]|(?:\\(?:alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega|Gamma|Delta|Theta|Lambda|Xi|Pi|Sigma|Upsilon|Phi|Psi|Omega)(?:\s+|\{\}|(?![a-zA-Z]))))+/,"\\greek":/^\\(?:alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega|Gamma|Delta|Theta|Lambda|Xi|Pi|Sigma|Upsilon|Phi|Psi|Omega)(?:\s+|\{\}|(?![a-zA-Z]))/,"one lowercase latin letter $":/^(?:([a-z])(?:$|[^a-zA-Z]))$/,"$one lowercase latin letter$ $":/^\$(?:([a-z])(?:$|[^a-zA-Z]))\$$/,"one lowercase greek letter $":/^(?:\$?[\u03B1-\u03C9]\$?|\$?\\(?:alpha|beta|gamma|delta|epsilon|zeta|eta|theta|iota|kappa|lambda|mu|nu|xi|omicron|pi|rho|sigma|tau|upsilon|phi|chi|psi|omega)\s*\$?)(?:\s+|\{\}|(?![a-zA-Z]))$/,digits:/^[0-9]+/,"-9.,9":/^[+\-]?(?:[0-9]+(?:[,.][0-9]+)?|[0-9]*(?:\.[0-9]+))/,"-9.,9 no missing 0":/^[+\-]?[0-9]+(?:[.,][0-9]+)?/,"(-)(9.,9)(e)(99)":function(t){var e=t.match(/^(\+\-|\+\/\-|\+|\-|\\pm\s?)?([0-9]+(?:[,.][0-9]+)?|[0-9]*(?:\.[0-9]+))?(\((?:[0-9]+(?:[,.][0-9]+)?|[0-9]*(?:\.[0-9]+))\))?(?:([eE]|\s*(\*|x|\\times|\u00D7)\s*10\^)([+\-]?[0-9]+|\{[+\-]?[0-9]+\}))?/);return e&&e[0]?{match_:e.splice(1),remainder:t.substr(e[0].length)}:null},"(-)(9)^(-9)":function(t){var e=t.match(/^(\+\-|\+\/\-|\+|\-|\\pm\s?)?([0-9]+(?:[,.][0-9]+)?|[0-9]*(?:\.[0-9]+)?)\^([+\-]?[0-9]+|\{[+\-]?[0-9]+\})/);return e&&e[0]?{match_:e.splice(1),remainder:t.substr(e[0].length)}:null},"state of aggregation $":function(t){var e=m.patterns.findObserveGroups(t,"",/^\([a-z]{1,3}(?=[\),])/,")","");if(e&&e.remainder.match(/^($|[\s,;\)\]\}])/))return e;var n=t.match(/^(?:\((?:\\ca\s?)?\$[amothc]\$\))/);return n?{match_:n[0],remainder:t.substr(n[0].length)}:null},"_{(state of aggregation)}$":/^_\{(\([a-z]{1,3}\))\}/,"{[(":/^(?:\\\{|\[|\()/,")]}":/^(?:\)|\]|\\\})/,", ":/^[,;]\s*/,",":/^[,;]/,".":/^[.]/,". ":/^([.\u22C5\u00B7\u2022])\s*/,"...":/^\.\.\.(?=$|[^.])/,"* ":/^([*])\s*/,"^{(...)}":function(t){return m.patterns.findObserveGroups(t,"^{","","","}")},"^($...$)":function(t){return m.patterns.findObserveGroups(t,"^","$","$","")},"^a":/^\^([0-9]+|[^\\_])/,"^\\x{}{}":function(t){return m.patterns.findObserveGroups(t,"^",/^\\[a-zA-Z]+\{/,"}","","","{","}","",!0)},"^\\x{}":function(t){return m.patterns.findObserveGroups(t,"^",/^\\[a-zA-Z]+\{/,"}","")},"^\\x":/^\^(\\[a-zA-Z]+)\s*/,"^(-1)":/^\^(-?\d+)/,"'":/^'/,"_{(...)}":function(t){return m.patterns.findObserveGroups(t,"_{","","","}")},"_($...$)":function(t){return m.patterns.findObserveGroups(t,"_","$","$","")},_9:/^_([+\-]?[0-9]+|[^\\])/,"_\\x{}{}":function(t){return m.patterns.findObserveGroups(t,"_",/^\\[a-zA-Z]+\{/,"}","","","{","}","",!0)},"_\\x{}":function(t){return m.patterns.findObserveGroups(t,"_",/^\\[a-zA-Z]+\{/,"}","")},"_\\x":/^_(\\[a-zA-Z]+)\s*/,"^_":/^(?:\^(?=_)|\_(?=\^)|[\^_]$)/,"{}":/^\{\}/,"{...}":function(t){return m.patterns.findObserveGroups(t,"","{","}","")},"{(...)}":function(t){return m.patterns.findObserveGroups(t,"{","","","}")},"$...$":function(t){return m.patterns.findObserveGroups(t,"","$","$","")},"${(...)}$":function(t){return m.patterns.findObserveGroups(t,"${","","","}$")},"$(...)$":function(t){return m.patterns.findObserveGroups(t,"$","","","$")},"=<>":/^[=<>]/,"#":/^[#\u2261]/,"+":/^\+/,"-$":/^-(?=[\s_},;\]/]|$|\([a-z]+\))/,"-9":/^-(?=[0-9])/,"- orbital overlap":/^-(?=(?:[spd]|sp)(?:$|[\s,;\)\]\}]))/,"-":/^-/,"pm-operator":/^(?:\\pm|\$\\pm\$|\+-|\+\/-)/,operator:/^(?:\+|(?:[\-=<>]|<<|>>|\\approx|\$\\approx\$)(?=\s|$|-?[0-9]))/,arrowUpDown:/^(?:v|\(v\)|\^|\(\^\))(?=$|[\s,;\)\]\}])/,"\\bond{(...)}":function(t){return m.patterns.findObserveGroups(t,"\\bond{","","","}")},"->":/^(?:<->|<-->|->|<-|<=>>|<<=>|<=>|[\u2192\u27F6\u21CC])/,CMT:/^[CMT](?=\[)/,"[(...)]":function(t){return m.patterns.findObserveGroups(t,"[","","","]")},"1st-level escape":/^(&|\\\\|\\hline)\s*/,"\\,":/^(?:\\[,\ ;:])/,"\\x{}{}":function(t){return m.patterns.findObserveGroups(t,"",/^\\[a-zA-Z]+\{/,"}","","","{","}","",!0)},"\\x{}":function(t){return m.patterns.findObserveGroups(t,"",/^\\[a-zA-Z]+\{/,"}","")},"\\ca":/^\\ca(?:\s+|(?![a-zA-Z]))/,"\\x":/^(?:\\[a-zA-Z]+\s*|\\[_&{}%])/,orbital:/^(?:[0-9]{1,2}[spdfgh]|[0-9]{0,2}sp)(?=$|[^a-zA-Z])/,others:/^[\/~|]/,"\\frac{(...)}":function(t){return m.patterns.findObserveGroups(t,"\\frac{","","","}","{","","","}")},"\\overset{(...)}":function(t){return m.patterns.findObserveGroups(t,"\\overset{","","","}","{","","","}")},"\\underset{(...)}":function(t){return m.patterns.findObserveGroups(t,"\\underset{","","","}","{","","","}")},"\\underbrace{(...)}":function(t){return m.patterns.findObserveGroups(t,"\\underbrace{","","","}_","{","","","}")},"\\color{(...)}0":function(t){return m.patterns.findObserveGroups(t,"\\color{","","","}")},"\\color{(...)}{(...)}1":function(t){return m.patterns.findObserveGroups(t,"\\color{","","","}","{","","","}")},"\\color(...){(...)}2":function(t){return m.patterns.findObserveGroups(t,"\\color","\\","",/^(?=\{)/,"{","","","}")},"\\ce{(...)}":function(t){return m.patterns.findObserveGroups(t,"\\ce{","","","}")},oxidation$:/^(?:[+-][IVX]+|\\pm\s*0|\$\\pm\$\s*0)$/,"d-oxidation$":/^(?:[+-]?\s?[IVX]+|\\pm\s*0|\$\\pm\$\s*0)$/,"roman numeral":/^[IVX]+/,"1/2$":/^[+\-]?(?:[0-9]+|\$[a-z]\$|[a-z])\/[0-9]+(?:\$[a-z]\$|[a-z])?$/,amount:function(t){var e;if(e=t.match(/^(?:(?:(?:\([+\-]?[0-9]+\/[0-9]+\)|[+\-]?(?:[0-9]+|\$[a-z]\$|[a-z])\/[0-9]+|[+\-]?[0-9]+[.,][0-9]+|[+\-]?\.[0-9]+|[+\-]?[0-9]+)(?:[a-z](?=\s*[A-Z]))?)|[+\-]?[a-z](?=\s*[A-Z])|\+(?!\s))/))return{match_:e[0],remainder:t.substr(e[0].length)};var n=m.patterns.findObserveGroups(t,"","$","$","");return n&&(e=n.match_.match(/^\$(?:\(?[+\-]?(?:[0-9]*[a-z]?[+\-])?[0-9]*[a-z](?:[+\-][0-9]*[a-z]?)?\)?|\+|-)\$$/))?{match_:e[0],remainder:t.substr(e[0].length)}:null},amount2:function(t){return this.amount(t)},"(KV letters),":/^(?:[A-Z][a-z]{0,2}|i)(?=,)/,formula$:function(t){if(t.match(/^\([a-z]+\)$/))return null;var e=t.match(/^(?:[a-z]|(?:[0-9\ \+\-\,\.\(\)]+[a-z])+[0-9\ \+\-\,\.\(\)]*|(?:[a-z][0-9\ \+\-\,\.\(\)]+)+[a-z]?)$/);return e?{match_:e[0],remainder:t.substr(e[0].length)}:null},uprightEntities:/^(?:pH|pOH|pC|pK|iPr|iBu)(?=$|[^a-zA-Z])/,"/":/^\s*(\/)\s*/,"//":/^\s*(\/\/)\s*/,"*":/^\s*[*.]\s*/},findObserveGroups:function(t,e,n,a,o,r,i,c,u,p){function s(t,e){if("string"==typeof e)return 0!==t.indexOf(e)?null:e;var n=t.match(e);return n?n[0]:null}var _=s(t,e);if(null===_)return null;if(t=t.substr(_.length),null===(_=s(t,n)))return null;var l=function(t,e,n){for(var a=0;e<t.length;){var o=t.charAt(e),r=s(t.substr(e),n);if(null!==r&&0===a)return{endMatchBegin:e,endMatchEnd:e+r.length};if("{"===o)a++;else if("}"===o){if(0===a)throw["ExtraCloseMissingOpen","Extra close brace or missing open brace"];a--}e++}return null}(t,_.length,a||o);if(null===l)return null;var d=t.substring(0,a?l.endMatchEnd:l.endMatchBegin);if(r||i){var m=this.findObserveGroups(t.substr(l.endMatchEnd),r,i,c,u);if(null===m)return null;var h=[d,m.match_];return{match_:p?h.join(""):h,remainder:m.remainder}}return{match_:d,remainder:t.substr(l.endMatchEnd)}},match_:function(t,e){var n=m.patterns.patterns[t];if(void 0===n)throw["MhchemBugP","mhchem bug P. Please report. ("+t+")"];if("function"==typeof n)return m.patterns.patterns[t](e);var a=e.match(n);return a?{match_:a[2]?[a[1],a[2]]:a[1]?a[1]:a[0],remainder:e.substr(a[0].length)}:null}},actions:{"a=":function(t,e){t.a=(t.a||"")+e},"b=":function(t,e){t.b=(t.b||"")+e},"p=":function(t,e){t.p=(t.p||"")+e},"o=":function(t,e){t.o=(t.o||"")+e},"q=":function(t,e){t.q=(t.q||"")+e},"d=":function(t,e){t.d=(t.d||"")+e},"rm=":function(t,e){t.rm=(t.rm||"")+e},"text=":function(t,e){t.text_=(t.text_||"")+e},insert:function(t,e,n){return{type_:n}},"insert+p1":function(t,e,n){return{type_:n,p1:e}},"insert+p1+p2":function(t,e,n){return{type_:n,p1:e[0],p2:e[1]}},copy:function(t,e){return e},rm:function(t,e){return{type_:"rm",p1:e||""}},text:function(t,e){return m.go(e,"text")},"{text}":function(t,e){var n=["{"];return m.concatArray(n,m.go(e,"text")),n.push("}"),n},"tex-math":function(t,e){return m.go(e,"tex-math")},"tex-math tight":function(t,e){return m.go(e,"tex-math tight")},bond:function(t,e,n){return{type_:"bond",kind_:n||e}},"color0-output":function(t,e){return{type_:"color0",color:e[0]}},ce:function(t,e){return m.go(e)},"1/2":function(t,e){var n=[];e.match(/^[+\-]/)&&(n.push(e.substr(0,1)),e=e.substr(1));var a=e.match(/^([0-9]+|\$[a-z]\$|[a-z])\/([0-9]+)(\$[a-z]\$|[a-z])?$/);return a[1]=a[1].replace(/\$/g,""),n.push({type_:"frac",p1:a[1],p2:a[2]}),a[3]&&(a[3]=a[3].replace(/\$/g,""),n.push({type_:"tex-math",p1:a[3]})),n},"9,9":function(t,e){return m.go(e,"9,9")}},createTransitions:function(t){var e,n,a,o,r={};for(e in t)for(n in t[e])for(a=n.split("|"),t[e][n].stateArray=a,o=0;o<a.length;o++)r[a[o]]=[];for(e in t)for(n in t[e])for(a=t[e][n].stateArray||[],o=0;o<a.length;o++){var i=t[e][n];if(i.action_){i.action_=[].concat(i.action_);for(var c=0;c<i.action_.length;c++)"string"==typeof i.action_[c]&&(i.action_[c]={type_:i.action_[c]})}else i.action_=[];for(var u=e.split("|"),p=0;p<u.length;p++)if("*"===a[o])for(var s in r)r[s].push({pattern:u[p],task:i});else r[a[o]].push({pattern:u[p],task:i})}return r},stateMachines:{}};m.stateMachines={ce:{transitions:m.createTransitions({empty:{"*":{action_:"output"}},else:{"0|1|2":{action_:"beginsWithBond=false",revisit:!0,toContinue:!0}},oxidation$:{0:{action_:"oxidation-output"}},CMT:{r:{action_:"rdt=",nextState:"rt"},rd:{action_:"rqt=",nextState:"rdt"}},arrowUpDown:{"0|1|2|as":{action_:["sb=false","output","operator"],nextState:"1"}},uprightEntities:{"0|1|2":{action_:["o=","output"],nextState:"1"}},orbital:{"0|1|2|3":{action_:"o=",nextState:"o"}},"->":{"0|1|2|3":{action_:"r=",nextState:"r"},"a|as":{action_:["output","r="],nextState:"r"},"*":{action_:["output","r="],nextState:"r"}},"+":{o:{action_:"d= kv",nextState:"d"},"d|D":{action_:"d=",nextState:"d"},q:{action_:"d=",nextState:"qd"},"qd|qD":{action_:"d=",nextState:"qd"},dq:{action_:["output","d="],nextState:"d"},3:{action_:["sb=false","output","operator"],nextState:"0"}},amount:{"0|2":{action_:"a=",nextState:"a"}},"pm-operator":{"0|1|2|a|as":{action_:["sb=false","output",{type_:"operator",option:"\\pm"}],nextState:"0"}},operator:{"0|1|2|a|as":{action_:["sb=false","output","operator"],nextState:"0"}},"-$":{"o|q":{action_:["charge or bond","output"],nextState:"qd"},d:{action_:"d=",nextState:"d"},D:{action_:["output",{type_:"bond",option:"-"}],nextState:"3"},q:{action_:"d=",nextState:"qd"},qd:{action_:"d=",nextState:"qd"},"qD|dq":{action_:["output",{type_:"bond",option:"-"}],nextState:"3"}},"-9":{"3|o":{action_:["output",{type_:"insert",option:"hyphen"}],nextState:"3"}},"- orbital overlap":{o:{action_:["output",{type_:"insert",option:"hyphen"}],nextState:"2"},d:{action_:["output",{type_:"insert",option:"hyphen"}],nextState:"2"}},"-":{"0|1|2":{action_:[{type_:"output",option:1},"beginsWithBond=true",{type_:"bond",option:"-"}],nextState:"3"},3:{action_:{type_:"bond",option:"-"}},a:{action_:["output",{type_:"insert",option:"hyphen"}],nextState:"2"},as:{action_:[{type_:"output",option:2},{type_:"bond",option:"-"}],nextState:"3"},b:{action_:"b="},o:{action_:{type_:"- after o/d",option:!1},nextState:"2"},q:{action_:{type_:"- after o/d",option:!1},nextState:"2"},"d|qd|dq":{action_:{type_:"- after o/d",option:!0},nextState:"2"},"D|qD|p":{action_:["output",{type_:"bond",option:"-"}],nextState:"3"}},amount2:{"1|3":{action_:"a=",nextState:"a"}},letters:{"0|1|2|3|a|as|b|p|bp|o":{action_:"o=",nextState:"o"},"q|dq":{action_:["output","o="],nextState:"o"},"d|D|qd|qD":{action_:"o after d",nextState:"o"}},digits:{o:{action_:"q=",nextState:"q"},"d|D":{action_:"q=",nextState:"dq"},q:{action_:["output","o="],nextState:"o"},a:{action_:"o=",nextState:"o"}},"space A":{"b|p|bp":{}},space:{a:{nextState:"as"},0:{action_:"sb=false"},"1|2":{action_:"sb=true"},"r|rt|rd|rdt|rdq":{action_:"output",nextState:"0"},"*":{action_:["output","sb=true"],nextState:"1"}},"1st-level escape":{"1|2":{action_:["output",{type_:"insert+p1",option:"1st-level escape"}]},"*":{action_:["output",{type_:"insert+p1",option:"1st-level escape"}],nextState:"0"}},"[(...)]":{"r|rt":{action_:"rd=",nextState:"rd"},"rd|rdt":{action_:"rq=",nextState:"rdq"}},"...":{"o|d|D|dq|qd|qD":{action_:["output",{type_:"bond",option:"..."}],nextState:"3"},"*":{action_:[{type_:"output",option:1},{type_:"insert",option:"ellipsis"}],nextState:"1"}},". |* ":{"*":{action_:["output",{type_:"insert",option:"addition compound"}],nextState:"1"}},"state of aggregation $":{"*":{action_:["output","state of aggregation"],nextState:"1"}},"{[(":{"a|as|o":{action_:["o=","output","parenthesisLevel++"],nextState:"2"},"0|1|2|3":{action_:["o=","output","parenthesisLevel++"],nextState:"2"},"*":{action_:["output","o=","output","parenthesisLevel++"],nextState:"2"}},")]}":{"0|1|2|3|b|p|bp|o":{action_:["o=","parenthesisLevel--"],nextState:"o"},"a|as|d|D|q|qd|qD|dq":{action_:["output","o=","parenthesisLevel--"],nextState:"o"}},", ":{"*":{action_:["output","comma"],nextState:"0"}},"^_":{"*":{}},"^{(...)}|^($...$)":{"0|1|2|as":{action_:"b=",nextState:"b"},p:{action_:"b=",nextState:"bp"},"3|o":{action_:"d= kv",nextState:"D"},q:{action_:"d=",nextState:"qD"},"d|D|qd|qD|dq":{action_:["output","d="],nextState:"D"}},"^a|^\\x{}{}|^\\x{}|^\\x|'":{"0|1|2|as":{action_:"b=",nextState:"b"},p:{action_:"b=",nextState:"bp"},"3|o":{action_:"d= kv",nextState:"d"},q:{action_:"d=",nextState:"qd"},"d|qd|D|qD":{action_:"d="},dq:{action_:["output","d="],nextState:"d"}},"_{(state of aggregation)}$":{"d|D|q|qd|qD|dq":{action_:["output","q="],nextState:"q"}},"_{(...)}|_($...$)|_9|_\\x{}{}|_\\x{}|_\\x":{"0|1|2|as":{action_:"p=",nextState:"p"},b:{action_:"p=",nextState:"bp"},"3|o":{action_:"q=",nextState:"q"},"d|D":{action_:"q=",nextState:"dq"},"q|qd|qD|dq":{action_:["output","q="],nextState:"q"}},"=<>":{"0|1|2|3|a|as|o|q|d|D|qd|qD|dq":{action_:[{type_:"output",option:2},"bond"],nextState:"3"}},"#":{"0|1|2|3|a|as|o":{action_:[{type_:"output",option:2},{type_:"bond",option:"#"}],nextState:"3"}},"{}":{"*":{action_:{type_:"output",option:1},nextState:"1"}},"{...}":{"0|1|2|3|a|as|b|p|bp":{action_:"o=",nextState:"o"},"o|d|D|q|qd|qD|dq":{action_:["output","o="],nextState:"o"}},"$...$":{a:{action_:"a="},"0|1|2|3|as|b|p|bp|o":{action_:"o=",nextState:"o"},"as|o":{action_:"o="},"q|d|D|qd|qD|dq":{action_:["output","o="],nextState:"o"}},"\\bond{(...)}":{"*":{action_:[{type_:"output",option:2},"bond"],nextState:"3"}},"\\frac{(...)}":{"*":{action_:[{type_:"output",option:1},"frac-output"],nextState:"3"}},"\\overset{(...)}":{"*":{action_:[{type_:"output",option:2},"overset-output"],nextState:"3"}},"\\underset{(...)}":{"*":{action_:[{type_:"output",option:2},"underset-output"],nextState:"3"}},"\\underbrace{(...)}":{"*":{action_:[{type_:"output",option:2},"underbrace-output"],nextState:"3"}},"\\color{(...)}{(...)}1|\\color(...){(...)}2":{"*":{action_:[{type_:"output",option:2},"color-output"],nextState:"3"}},"\\color{(...)}0":{"*":{action_:[{type_:"output",option:2},"color0-output"]}},"\\ce{(...)}":{"*":{action_:[{type_:"output",option:2},"ce"],nextState:"3"}},"\\,":{"*":{action_:[{type_:"output",option:1},"copy"],nextState:"1"}},"\\x{}{}|\\x{}|\\x":{"0|1|2|3|a|as|b|p|bp|o|c0":{action_:["o=","output"],nextState:"3"},"*":{action_:["output","o=","output"],nextState:"3"}},others:{"*":{action_:[{type_:"output",option:1},"copy"],nextState:"3"}},else2:{a:{action_:"a to o",nextState:"o",revisit:!0},as:{action_:["output","sb=true"],nextState:"1",revisit:!0},"r|rt|rd|rdt|rdq":{action_:["output"],nextState:"0",revisit:!0},"*":{action_:["output","copy"],nextState:"3"}}}),actions:{"o after d":function(t,e){var n;if((t.d||"").match(/^[0-9]+$/)){var a=t.d;t.d=void 0,n=this.output(t),t.b=a}else n=this.output(t);return m.actions["o="](t,e),n},"d= kv":function(t,e){t.d=e,t.dType="kv"},"charge or bond":function(t,e){if(t.beginsWithBond){var n=[];return m.concatArray(n,this.output(t)),m.concatArray(n,m.actions.bond(t,e,"-")),n}t.d=e},"- after o/d":function(t,e,n){var a=m.patterns.match_("orbital",t.o||""),o=m.patterns.match_("one lowercase greek letter $",t.o||""),r=m.patterns.match_("one lowercase latin letter $",t.o||""),i=m.patterns.match_("$one lowercase latin letter$ $",t.o||""),c="-"===e&&(a&&""===a.remainder||o||r||i);!c||t.a||t.b||t.p||t.d||t.q||a||!r||(t.o="$"+t.o+"$");var u=[];return c?(m.concatArray(u,this.output(t)),u.push({type_:"hyphen"})):(a=m.patterns.match_("digits",t.d||""),n&&a&&""===a.remainder?(m.concatArray(u,m.actions["d="](t,e)),m.concatArray(u,this.output(t))):(m.concatArray(u,this.output(t)),m.concatArray(u,m.actions.bond(t,e,"-")))),u},"a to o":function(t){t.o=t.a,t.a=void 0},"sb=true":function(t){t.sb=!0},"sb=false":function(t){t.sb=!1},"beginsWithBond=true":function(t){t.beginsWithBond=!0},"beginsWithBond=false":function(t){t.beginsWithBond=!1},"parenthesisLevel++":function(t){t.parenthesisLevel++},"parenthesisLevel--":function(t){t.parenthesisLevel--},"state of aggregation":function(t,e){return{type_:"state of aggregation",p1:m.go(e,"o")}},comma:function(t,e){var n=e.replace(/\s*$/,"");return n!==e&&0===t.parenthesisLevel?{type_:"comma enumeration L",p1:n}:{type_:"comma enumeration M",p1:n}},output:function(t,e,n){var a,o,r;t.r?(o="M"===t.rdt?m.go(t.rd,"tex-math"):"T"===t.rdt?[{type_:"text",p1:t.rd||""}]:m.go(t.rd),r="M"===t.rqt?m.go(t.rq,"tex-math"):"T"===t.rqt?[{type_:"text",p1:t.rq||""}]:m.go(t.rq),a={type_:"arrow",r:t.r,rd:o,rq:r}):(a=[],(t.a||t.b||t.p||t.o||t.q||t.d||n)&&(t.sb&&a.push({type_:"entitySkip"}),t.o||t.q||t.d||t.b||t.p||2===n?t.o||t.q||t.d||!t.b&&!t.p?t.o&&"kv"===t.dType&&m.patterns.match_("d-oxidation$",t.d||"")?t.dType="oxidation":t.o&&"kv"===t.dType&&!t.q&&(t.dType=void 0):(t.o=t.a,t.d=t.b,t.q=t.p,t.a=t.b=t.p=void 0):(t.o=t.a,t.a=void 0),a.push({type_:"chemfive",a:m.go(t.a,"a"),b:m.go(t.b,"bd"),p:m.go(t.p,"pq"),o:m.go(t.o,"o"),q:m.go(t.q,"pq"),d:m.go(t.d,"oxidation"===t.dType?"oxidation":"bd"),dType:t.dType})));for(var i in t)"parenthesisLevel"!==i&&"beginsWithBond"!==i&&delete t[i];return a},"oxidation-output":function(t,e){var n=["{"];return m.concatArray(n,m.go(e,"oxidation")),n.push("}"),n},"frac-output":function(t,e){return{type_:"frac-ce",p1:m.go(e[0]),p2:m.go(e[1])}},"overset-output":function(t,e){return{type_:"overset",p1:m.go(e[0]),p2:m.go(e[1])}},"underset-output":function(t,e){return{type_:"underset",p1:m.go(e[0]),p2:m.go(e[1])}},"underbrace-output":function(t,e){return{type_:"underbrace",p1:m.go(e[0]),p2:m.go(e[1])}},"color-output":function(t,e){return{type_:"color",color1:e[0],color2:m.go(e[1])}},"r=":function(t,e){t.r=e},"rdt=":function(t,e){t.rdt=e},"rd=":function(t,e){t.rd=e},"rqt=":function(t,e){t.rqt=e},"rq=":function(t,e){t.rq=e},operator:function(t,e,n){return{type_:"operator",kind_:n||e}}}},a:{transitions:m.createTransitions({empty:{"*":{}},"1/2$":{0:{action_:"1/2"}},else:{0:{nextState:"1",revisit:!0}},"$(...)$":{"*":{action_:"tex-math tight",nextState:"1"}},",":{"*":{action_:{type_:"insert",option:"commaDecimal"}}},else2:{"*":{action_:"copy"}}}),actions:{}},o:{transitions:m.createTransitions({empty:{"*":{}},"1/2$":{0:{action_:"1/2"}},else:{0:{nextState:"1",revisit:!0}},letters:{"*":{action_:"rm"}},"\\ca":{"*":{action_:{type_:"insert",option:"circa"}}},"\\x{}{}|\\x{}|\\x":{"*":{action_:"copy"}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},"{(...)}":{"*":{action_:"{text}"}},else2:{"*":{action_:"copy"}}}),actions:{}},text:{transitions:m.createTransitions({empty:{"*":{action_:"output"}},"{...}":{"*":{action_:"text="}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},"\\greek":{"*":{action_:["output","rm"]}},"\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:["output","copy"]}},else:{"*":{action_:"text="}}}),actions:{output:function(t){if(t.text_){var e={type_:"text",p1:t.text_};for(var n in t)delete t[n];return e}}}},pq:{transitions:m.createTransitions({empty:{"*":{}},"state of aggregation $":{"*":{action_:"state of aggregation"}},i$:{0:{nextState:"!f",revisit:!0}},"(KV letters),":{0:{action_:"rm",nextState:"0"}},formula$:{0:{nextState:"f",revisit:!0}},"1/2$":{0:{action_:"1/2"}},else:{0:{nextState:"!f",revisit:!0}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},"{(...)}":{"*":{action_:"text"}},"a-z":{f:{action_:"tex-math"}},letters:{"*":{action_:"rm"}},"-9.,9":{"*":{action_:"9,9"}},",":{"*":{action_:{type_:"insert+p1",option:"comma enumeration S"}}},"\\color{(...)}{(...)}1|\\color(...){(...)}2":{"*":{action_:"color-output"}},"\\color{(...)}0":{"*":{action_:"color0-output"}},"\\ce{(...)}":{"*":{action_:"ce"}},"\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:"copy"}},else2:{"*":{action_:"copy"}}}),actions:{"state of aggregation":function(t,e){return{type_:"state of aggregation subscript",p1:m.go(e,"o")}},"color-output":function(t,e){return{type_:"color",color1:e[0],color2:m.go(e[1],"pq")}}}},bd:{transitions:m.createTransitions({empty:{"*":{}},x$:{0:{nextState:"!f",revisit:!0}},formula$:{0:{nextState:"f",revisit:!0}},else:{0:{nextState:"!f",revisit:!0}},"-9.,9 no missing 0":{"*":{action_:"9,9"}},".":{"*":{action_:{type_:"insert",option:"electron dot"}}},"a-z":{f:{action_:"tex-math"}},x:{"*":{action_:{type_:"insert",option:"KV x"}}},letters:{"*":{action_:"rm"}},"'":{"*":{action_:{type_:"insert",option:"prime"}}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},"{(...)}":{"*":{action_:"text"}},"\\color{(...)}{(...)}1|\\color(...){(...)}2":{"*":{action_:"color-output"}},"\\color{(...)}0":{"*":{action_:"color0-output"}},"\\ce{(...)}":{"*":{action_:"ce"}},"\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:"copy"}},else2:{"*":{action_:"copy"}}}),actions:{"color-output":function(t,e){return{type_:"color",color1:e[0],color2:m.go(e[1],"bd")}}}},oxidation:{transitions:m.createTransitions({empty:{"*":{}},"roman numeral":{"*":{action_:"roman-numeral"}},"${(...)}$|$(...)$":{"*":{action_:"tex-math"}},else:{"*":{action_:"copy"}}}),actions:{"roman-numeral":function(t,e){return{type_:"roman numeral",p1:e||""}}}},"tex-math":{transitions:m.createTransitions({empty:{"*":{action_:"output"}},"\\ce{(...)}":{"*":{action_:["output","ce"]}},"{...}|\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:"o="}},else:{"*":{action_:"o="}}}),actions:{output:function(t){if(t.o){var e={type_:"tex-math",p1:t.o};for(var n in t)delete t[n];return e}}}},"tex-math tight":{transitions:m.createTransitions({empty:{"*":{action_:"output"}},"\\ce{(...)}":{"*":{action_:["output","ce"]}},"{...}|\\,|\\x{}{}|\\x{}|\\x":{"*":{action_:"o="}},"-|+":{"*":{action_:"tight operator"}},else:{"*":{action_:"o="}}}),actions:{"tight operator":function(t,e){t.o=(t.o||"")+"{"+e+"}"},output:function(t){if(t.o){var e={type_:"tex-math",p1:t.o};for(var n in t)delete t[n];return e}}}},"9,9":{transitions:m.createTransitions({empty:{"*":{}},",":{"*":{action_:"comma"}},else:{"*":{action_:"copy"}}}),actions:{comma:function(){return{type_:"commaDecimal"}}}},pu:{transitions:m.createTransitions({empty:{"*":{action_:"output"}},space$:{"*":{action_:["output","space"]}},"{[(|)]}":{"0|a":{action_:"copy"}},"(-)(9)^(-9)":{0:{action_:"number^",nextState:"a"}},"(-)(9.,9)(e)(99)":{0:{action_:"enumber",nextState:"a"}},space:{"0|a":{}},"pm-operator":{"0|a":{action_:{type_:"operator",option:"\\pm"},nextState:"0"}},operator:{"0|a":{action_:"copy",nextState:"0"}},"//":{d:{action_:"o=",nextState:"/"}},"/":{d:{action_:"o=",nextState:"/"}},"{...}|else":{"0|d":{action_:"d=",nextState:"d"},a:{action_:["space","d="],nextState:"d"},"/|q":{action_:"q=",nextState:"q"}}}),actions:{enumber:function(t,e){var n=[];return"+-"===e[0]||"+/-"===e[0]?n.push("\\pm "):e[0]&&n.push(e[0]),e[1]&&(m.concatArray(n,m.go(e[1],"pu-9,9")),e[2]&&(e[2].match(/[,.]/)?m.concatArray(n,m.go(e[2],"pu-9,9")):n.push(e[2])),e[3]=e[4]||e[3],e[3]&&(e[3]=e[3].trim(),"e"===e[3]||"*"===e[3].substr(0,1)?n.push({type_:"cdot"}):n.push({type_:"times"}))),e[3]&&n.push("10^{"+e[5]+"}"),n},"number^":function(t,e){var n=[];return"+-"===e[0]||"+/-"===e[0]?n.push("\\pm "):e[0]&&n.push(e[0]),m.concatArray(n,m.go(e[1],"pu-9,9")),n.push("^{"+e[2]+"}"),n},operator:function(t,e,n){return{type_:"operator",kind_:n||e}},space:function(){return{type_:"pu-space-1"}},output:function(t){var e,n=m.patterns.match_("{(...)}",t.d||"");n&&""===n.remainder&&(t.d=n.match_);var a=m.patterns.match_("{(...)}",t.q||"");if(a&&""===a.remainder&&(t.q=a.match_),t.d&&(t.d=t.d.replace(/\u00B0C|\^oC|\^{o}C/g,"{}^{\\circ}C"),t.d=t.d.replace(/\u00B0F|\^oF|\^{o}F/g,"{}^{\\circ}F")),t.q){t.q=t.q.replace(/\u00B0C|\^oC|\^{o}C/g,"{}^{\\circ}C"),t.q=t.q.replace(/\u00B0F|\^oF|\^{o}F/g,"{}^{\\circ}F");var o={d:m.go(t.d,"pu"),q:m.go(t.q,"pu")};"//"===t.o?e={type_:"pu-frac",p1:o.d,p2:o.q}:(1<(e=o.d).length||1<o.q.length?e.push({type_:" / "}):e.push({type_:"/"}),m.concatArray(e,o.q))}else e=m.go(t.d,"pu-2");for(var r in t)delete t[r];return e}}},"pu-2":{transitions:m.createTransitions({empty:{"*":{action_:"output"}},"*":{"*":{action_:["output","cdot"],nextState:"0"}},"\\x":{"*":{action_:"rm="}},space:{"*":{action_:["output","space"],nextState:"0"}},"^{(...)}|^(-1)":{1:{action_:"^(-1)"}},"-9.,9":{0:{action_:"rm=",nextState:"0"},1:{action_:"^(-1)",nextState:"0"}},"{...}|else":{"*":{action_:"rm=",nextState:"1"}}}),actions:{cdot:function(){return{type_:"tight cdot"}},"^(-1)":function(t,e){t.rm+="^{"+e+"}"},space:function(){return{type_:"pu-space-2"}},output:function(t){var e=[];if(t.rm){var n=m.patterns.match_("{(...)}",t.rm||"");e=n&&""===n.remainder?m.go(n.match_,"pu"):{type_:"rm",p1:t.rm}}for(var a in t)delete t[a];return e}}},"pu-9,9":{transitions:m.createTransitions({empty:{0:{action_:"output-0"},o:{action_:"output-o"}},",":{0:{action_:["output-0","comma"],nextState:"o"}},".":{0:{action_:["output-0","copy"],nextState:"o"}},else:{"*":{action_:"text="}}}),actions:{comma:function(){return{type_:"commaDecimal"}},"output-0":function(t){var e=[];if(t.text_=t.text_||"",4<t.text_.length){var n=t.text_.length%3;0===n&&(n=3);for(var a=t.text_.length-3;0<a;a-=3)e.push(t.text_.substr(a,3)),e.push({type_:"1000 separator"});e.push(t.text_.substr(0,n)),e.reverse()}else e.push(t.text_);for(var o in t)delete t[o];return e},"output-o":function(t){var e=[];if(t.text_=t.text_||"",4<t.text_.length){for(var n=t.text_.length-3,a=0;a<n;a+=3)e.push(t.text_.substr(a,3)),e.push({type_:"1000 separator"});e.push(t.text_.substr(a))}else e.push(t.text_);for(var o in t)delete t[o];return e}}}};var u={go:function(t,e){if(!t)return"";for(var n="",a=!1,o=0;o<t.length;o++){var r=t[o];"string"==typeof r?n+=r:(n+=u._go2(r),"1st-level escape"===r.type_&&(a=!0))}return e||a||!n||(n="{"+n+"}"),n},_goInner:function(t){return t?u.go(t,!0):t},_go2:function(t){var e;switch(t.type_){case"chemfive":e="";var n={a:u._goInner(t.a),b:u._goInner(t.b),p:u._goInner(t.p),o:u._goInner(t.o),q:u._goInner(t.q),d:u._goInner(t.d)};n.a&&(n.a.match(/^[+\-]/)&&(n.a="{"+n.a+"}"),e+=n.a+"\\,"),(n.b||n.p)&&(e+="{\\vphantom{X}}",e+="^{\\hphantom{"+(n.b||"")+"}}_{\\hphantom{"+(n.p||"")+"}}",e+="{\\vphantom{X}}",e+="^{\\smash[t]{\\vphantom{2}}\\llap{"+(n.b||"")+"}}",e+="_{\\vphantom{2}\\llap{\\smash[t]{"+(n.p||"")+"}}}"),n.o&&(n.o.match(/^[+\-]/)&&(n.o="{"+n.o+"}"),e+=n.o),"kv"===t.dType?((n.d||n.q)&&(e+="{\\vphantom{X}}"),n.d&&(e+="^{"+n.d+"}"),n.q&&(e+="_{\\smash[t]{"+n.q+"}}")):"oxidation"===t.dType?(n.d&&(e+="{\\vphantom{X}}",e+="^{"+n.d+"}"),n.q&&(e+="{\\vphantom{X}}",e+="_{\\smash[t]{"+n.q+"}}")):(n.q&&(e+="{\\vphantom{X}}",e+="_{\\smash[t]{"+n.q+"}}"),n.d&&(e+="{\\vphantom{X}}",e+="^{"+n.d+"}"));break;case"rm":e="\\mathrm{"+t.p1+"}";break;case"text":e=t.p1.match(/[\^_]/)?(t.p1=t.p1.replace(" ","~").replace("-","\\text{-}"),"\\mathrm{"+t.p1+"}"):"\\text{"+t.p1+"}";break;case"roman numeral":e="\\mathrm{"+t.p1+"}";break;case"state of aggregation":e="\\mskip2mu "+u._goInner(t.p1);break;case"state of aggregation subscript":e="\\mskip1mu "+u._goInner(t.p1);break;case"bond":if(!(e=u._getBond(t.kind_)))throw["MhchemErrorBond","mhchem Error. Unknown bond type ("+t.kind_+")"];break;case"frac":var a="\\frac{"+t.p1+"}{"+t.p2+"}";e="\\mathchoice{\\textstyle"+a+"}{"+a+"}{"+a+"}{"+a+"}";break;case"pu-frac":var o="\\frac{"+u._goInner(t.p1)+"}{"+u._goInner(t.p2)+"}";e="\\mathchoice{\\textstyle"+o+"}{"+o+"}{"+o+"}{"+o+"}";break;case"tex-math":e=t.p1+" ";break;case"frac-ce":e="\\frac{"+u._goInner(t.p1)+"}{"+u._goInner(t.p2)+"}";break;case"overset":e="\\overset{"+u._goInner(t.p1)+"}{"+u._goInner(t.p2)+"}";break;case"underset":e="\\underset{"+u._goInner(t.p1)+"}{"+u._goInner(t.p2)+"}";break;case"underbrace":e="\\underbrace{"+u._goInner(t.p1)+"}_{"+u._goInner(t.p2)+"}";break;case"color":e="{\\color{"+t.color1+"}{"+u._goInner(t.color2)+"}}";break;case"color0":e="\\color{"+t.color+"}";break;case"arrow":var r=u._goInner(t.rd),i=u._goInner(t.rq),c=u._getArrow(t.r);e=c=r||i?"<=>"===t.r||"<=>>"===t.r||"<<=>"===t.r||"<--\x3e"===t.r?(c="\\long"+c,r&&(c="\\overset{"+r+"}{"+c+"}"),i&&(c="\\underset{\\lower7mu{"+i+"}}{"+c+"}")," {}\\mathrel{"+c+"}{} "):(i&&(c+="[{"+i+"}]")," {}\\mathrel{\\x"+(c+="{"+r+"}")+"}{} "):" {}\\mathrel{\\long"+c+"}{} ";break;case"operator":e=u._getOperator(t.kind_);break;case"1st-level escape":e=t.p1+" ";break;case"space":e=" ";break;case"entitySkip":case"pu-space-1":e="~";break;case"pu-space-2":e="\\mkern3mu ";break;case"1000 separator":e="\\mkern2mu ";break;case"commaDecimal":e="{,}";break;case"comma enumeration L":e="{"+t.p1+"}\\mkern6mu ";break;case"comma enumeration M":e="{"+t.p1+"}\\mkern3mu ";break;case"comma enumeration S":e="{"+t.p1+"}\\mkern1mu ";break;case"hyphen":e="\\text{-}";break;case"addition compound":e="\\,{\\cdot}\\,";break;case"electron dot":e="\\mkern1mu \\bullet\\mkern1mu ";break;case"KV x":e="{\\times}";break;case"prime":e="\\prime ";break;case"cdot":e="\\cdot ";break;case"tight cdot":e="\\mkern1mu{\\cdot}\\mkern1mu ";break;case"times":e="\\times ";break;case"circa":e="{\\sim}";break;case"^":e="uparrow";break;case"v":e="downarrow";break;case"ellipsis":e="\\ldots ";break;case"/":e="/";break;case" / ":e="\\,/\\,";break;default:throw["MhchemBugT","mhchem bug T. Please report."]}return e},_getArrow:function(t){switch(t){case"->":case"\u2192":case"\u27f6":return"rightarrow";case"<-":return"leftarrow";case"<->":return"leftrightarrow";case"<--\x3e":return"leftrightarrows";case"<=>":case"\u21cc":return"rightleftharpoons";case"<=>>":return"Rightleftharpoons";case"<<=>":return"Leftrightharpoons";default:throw["MhchemBugT","mhchem bug T. Please report."]}},_getBond:function(t){switch(t){case"-":case"1":return"{-}";case"=":case"2":return"{=}";case"#":case"3":return"{\\equiv}";case"~":return"{\\tripledash}";case"~-":return"{\\rlap{\\lower.1em{-}}\\raise.1em{\\tripledash}}";case"~=":case"~--":return"{\\rlap{\\lower.2em{-}}\\rlap{\\raise.2em{\\tripledash}}-}";case"-~-":return"{\\rlap{\\lower.2em{-}}\\rlap{\\raise.2em{-}}\\tripledash}";case"...":return"{{\\cdot}{\\cdot}{\\cdot}}";case"....":return"{{\\cdot}{\\cdot}{\\cdot}{\\cdot}}";case"->":return"{\\rightarrow}";case"<-":return"{\\leftarrow}";case"<":return"{<}";case">":return"{>}";default:throw["MhchemBugT","mhchem bug T. Please report."]}},_getOperator:function(t){switch(t){case"+":return" {}+{} ";case"-":return" {}-{} ";case"=":return" {}={} ";case"<":return" {}<{} ";case">":return" {}>{} ";case"<<":return" {}\\ll{} ";case">>":return" {}\\gg{} ";case"\\pm":return" {}\\pm{} ";case"\\approx":case"$\\approx$":return" {}\\approx{} ";case"v":case"(v)":return" \\downarrow{} ";case"^":case"(^)":return" \\uparrow{} ";default:throw["MhchemBugT","mhchem bug T. Please report."]}}};function a(t){}function o(t){}e.mhchemParser=m,e.texify=u,e.assertNever=a,e.assertString=o}]); | PypiClean |
/HavNegpy-1.2.tar.gz/HavNegpy-1.2/docs/_build/doctrees/nbsphinx/_build/doctrees/nbsphinx/_build/html/_build/html/_build/html/hn_module_tutorial.ipynb | # Tutorial for the HN module of HavNegpy package
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import HavNegpy as dd
%matplotlib qt
os.chdir(r'M:\Marshall_Data\mohamed_data\mohamed_data\n44')
def create_dataframe(f):
col_names = ['Freq', 'T', 'Eps1', 'Eps2']
#f = input(str("Enter the filename:"))
df = pd.read_csv(f, sep=r"\s+",index_col=False,usecols = [0,1,2,3],names=col_names,header=None,skiprows=4,encoding='unicode_escape',engine='python')
col1 = ['log f']
for start in range(0, len(df), 63):
name = df['T'][start]
#print(name)
col1.append(name)
df2 = pd.DataFrame()
f1 = df['Freq'][0:63].values
x1 = np.log10((f1))
e = pd.DataFrame(x1)
df2['log f'] = pd.concat([e],axis=1,ignore_index=True)
global Cooling,Heating
for start in range(0, len(df), 63):
f = df['Eps2'][start:start+63].values
ep = np.log10(f)
d = pd.DataFrame(ep)
df2[start] = pd.concat([d],axis=1,ignore_index=True)
df2.columns = col1
'''
a = int(len(col1)/3)
b = 2*a
c = int(len(col1)) - b
Heating1 = df2.iloc[8:,0:a+1]
Cooling = df2.iloc[8:,a+1:b+1]
Heating2 = df2.iloc[8:,b+1:]
heat1_col = col1[0:a+1]
cool_col = col1[a+1:b+1]
heat2_col = col1[b+1:]
Cooling.columns = cool_col
Heating1.columns = heat1_col
Heating2.columns = heat2_col
f2 = df['Freq'][8:59].values
x2 = np.log10((f2))
Cooling['Freq'] = x2
Heating1['Freq'] = x2
Heating2['Freq'] = x2
'''
Cooling = df2.iloc[:,0:25]
Heating = df2.iloc[:,25:]
return df,df2,Cooling,Heating #Heating2
df,df2,cool,heat = create_dataframe('EPS.TXT')
x,y = df2['log f'][9:], heat[40][9:]
plt.figure()
plt.scatter(x,y,label='data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('Example for HN fitting')
```
image of the plot we are using in this tutorial

```
''' instantiate the HN module from HavgNegpy'''
hn = dd.HN()
''' select range to perform hn fitting'''
''' the select range functions pops in a separate window and allows you two clicks to select the region of interest (ROI)'''
''' In this tutorial, I'll plot the ROI and append as an image in the next cell'''
x1,y1 = hn.select_range(x,y)
''' view the data from select range'''
plt.scatter(x1,y1,label = 'Data for fitting')
plt.xlabel('log f [Hz]')
plt.ylabel('log $\epsilon$"')
plt.legend()
plt.title('ROI selected from HN module')
```
image of the ROI from HN module
```
''' dump the initial guess parameters using dump parameters method (varies for each fn), which dumps the parameters in a json file'''
''' this is required before performing the first fitting as it takes the initial guess from the json file created'''
hn.dump_parameters_hn()
''' view the initial guess for the ROI using initial_view method'''
''' I'll append the image in the next cell'''
hn.initial_view_hn(x1,y1)
```
image of the initial guess
```
''' pefrorm least squares fitting'''
''' The image of the curve fit is added in the next cell '''
hn.fit(x1,y1)
```
Example of the fit performed using single HN function
the procedure is similar for double HN and HN with conductivity

```
'''create a file to save fit results using create_analysis file method'''
''' before saving fit results an analysis file has to be created '''
hn.create_analysis_file()
''' save the fit results using save_fit method of the corresponding fit function'''
''' takes one argument, read more on the documentation'''
hn.save_fit_hn(1)
```
| PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/readable-stream/lib/internal/streams/end-of-stream.js |
'use strict';
var ERR_STREAM_PREMATURE_CLOSE = require('../../../errors').codes.ERR_STREAM_PREMATURE_CLOSE;
function once(callback) {
var called = false;
return function () {
if (called) return;
called = true;
for (var _len = arguments.length, args = new Array(_len), _key = 0; _key < _len; _key++) {
args[_key] = arguments[_key];
}
callback.apply(this, args);
};
}
function noop() {}
function isRequest(stream) {
return stream.setHeader && typeof stream.abort === 'function';
}
function eos(stream, opts, callback) {
if (typeof opts === 'function') return eos(stream, null, opts);
if (!opts) opts = {};
callback = once(callback || noop);
var readable = opts.readable || opts.readable !== false && stream.readable;
var writable = opts.writable || opts.writable !== false && stream.writable;
var onlegacyfinish = function onlegacyfinish() {
if (!stream.writable) onfinish();
};
var writableEnded = stream._writableState && stream._writableState.finished;
var onfinish = function onfinish() {
writable = false;
writableEnded = true;
if (!readable) callback.call(stream);
};
var readableEnded = stream._readableState && stream._readableState.endEmitted;
var onend = function onend() {
readable = false;
readableEnded = true;
if (!writable) callback.call(stream);
};
var onerror = function onerror(err) {
callback.call(stream, err);
};
var onclose = function onclose() {
var err;
if (readable && !readableEnded) {
if (!stream._readableState || !stream._readableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();
return callback.call(stream, err);
}
if (writable && !writableEnded) {
if (!stream._writableState || !stream._writableState.ended) err = new ERR_STREAM_PREMATURE_CLOSE();
return callback.call(stream, err);
}
};
var onrequest = function onrequest() {
stream.req.on('finish', onfinish);
};
if (isRequest(stream)) {
stream.on('complete', onfinish);
stream.on('abort', onclose);
if (stream.req) onrequest();else stream.on('request', onrequest);
} else if (writable && !stream._writableState) {
// legacy streams
stream.on('end', onlegacyfinish);
stream.on('close', onlegacyfinish);
}
stream.on('end', onend);
stream.on('finish', onfinish);
if (opts.error !== false) stream.on('error', onerror);
stream.on('close', onclose);
return function () {
stream.removeListener('complete', onfinish);
stream.removeListener('abort', onclose);
stream.removeListener('request', onrequest);
if (stream.req) stream.req.removeListener('finish', onfinish);
stream.removeListener('end', onlegacyfinish);
stream.removeListener('close', onlegacyfinish);
stream.removeListener('finish', onfinish);
stream.removeListener('end', onend);
stream.removeListener('error', onerror);
stream.removeListener('close', onclose);
};
}
module.exports = eos; | PypiClean |
/Hooke-1.0.0.alpha%20(Ninken).tar.gz/Hooke-1.0.0.alpha (Ninken)/hooke/ui/gui/dialog/points.py |
from numpy import arange
import wx
class ClickedPoint(object):
"""Defines a clicked point from a curve plot.
"""
def __init__(self):
self.is_marker=None #boolean ; decides if it is a marker
self.is_line_edge=None #boolean ; decides if it is the edge of a line (unused)
self.absolute_coords=(None,None) #(float,float) ; the absolute coordinates of the clicked point on the graph
self.graph_coords=(None,None) #(float,float) ; the coordinates of the plot that are nearest in X to the clicked point
self.index=None #integer ; the index of the clicked point with respect to the vector selected
self.dest=None #0 or 1 ; 0=top plot 1=bottom plot
def find_graph_coords(self,xvector,yvector):
"""Find the point in the dataset that is closest to `self`.
Given a clicked point on the plot, finds the nearest point in
the dataset (in X) that corresponds to the clicked point.
"""
dists=[]
for index in arange(1,len(xvector),1):
dists.append(((self.absolute_coords[0]-xvector[index])**2)+((self.absolute_coords[1]-yvector[index])**2))
self.index=dists.index(min(dists))
self.graph_coords=(xvector[self.index],yvector[self.index])
def measure_N_points(hooke_frame, N, message='', block=0):
'''
General helper function for N-points measurements
By default, measurements are done on the retraction
'''
if message:
dialog = wx.MessageDialog(None, message, 'Info', wx.OK)
dialog.ShowModal()
figure = self.GetActiveFigure()
xvector = self.displayed_plot.curves[block].x
yvector = self.displayed_plot.curves[block].y
clicked_points = figure.ginput(N, timeout=-1, show_clicks=True)
points = []
for clicked_point in clicked_points:
point = lib.clickedpoint.ClickedPoint()
point.absolute_coords = clicked_point[0], clicked_point[1]
point.dest = 0
#TODO: make this optional?
#so far, the clicked point is taken, not the corresponding data point
point.find_graph_coords(xvector, yvector)
point.is_line_edge = True
point.is_marker = True
points.append(point)
return points | PypiClean |
/DeepGMAP-0.2.0.tar.gz/DeepGMAP-0.2.0/deepgmap/network_constructors/conv4.py | import functools
import tensorflow as tf
import math
import sys
import numpy as np
_ac=il.import_module("deepgmap.network_constructors.auc_calc")
ac=_ac.auc_pr
#the code design came from https://gist.github.com/danijar/8663d3bbfd586bffecf6a0094cd116f2
def doublewrap(function):
@functools.wraps(function)
def decorator(*args, **kwargs):
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
return function(args[0])
else:
return lambda wrapee: function(wrapee, *args, **kwargs)
return decorator
@doublewrap
def define_scope(function, scope=None, *args, **kwargs):
"""
A decorator for functions that define TensorFlow operations. The wrapped
function will only be executed once. Subsequent calls to it will directly
return the result so that operations are added to the graph only once.
The operations added by the function live within a tf.variable_scope(). If
this decorator is used with arguments, they will be forwarded to the
variable scope. The scope name defaults to the name of the wrapped
function.
"""
attribute = '_cache_' + function.__name__
name = scope or function.__name__
@property
@functools.wraps(function)
def decorator(self):
if not hasattr(self, attribute):
with tf.variable_scope(name, *args, **kwargs):
setattr(self, attribute, function(self))
return getattr(self, attribute)
return decorator
class Model(object):
# parameter lists
initial_variation=0.005 #standard deviation of initial variables in the convolution filters
dimension1=320 #the number of the convolution filters in the 1st layer
dimension2=480
dimension20=480 #the number of the convolution filters in the 2nd layer
dimension21=480
dimension22=480
dimension4=925 #the number of the neurons in each layer of the fully-connected neural network
conv1_filter=9
conv2_filter=9
conv21_filter=7
conv22_filter=8
max_to_keep=2
train_speed=0.0001
def __init__(self, *args, **kwargs):
self.data_length=kwargs["data_length"]
self.image = kwargs["image"]
self.label = kwargs["label"]
self.phase=kwargs["phase"]
self.keep_prob=kwargs["keep_prob"]
self.keep_prob2=kwargs["keep_prob2"]
self.keep_prob3=kwargs["keep_prob3"]
self.start_at=kwargs["start_at"]
self.output_dir=kwargs["output_dir"]
self.max_to_keep=kwargs["max_to_keep"]
self.fc1_param=int(math.ceil((math.ceil((math.ceil((math.ceil((#math.ceil((
self.data_length-self.conv1_filter+1)/2.0)
-self.conv2_filter+1)/2.0)
#-self.conv20_filter+1)/2.0)
-self.conv21_filter+1)/2.0)
-self.conv22_filter+1)/4.0))
self.GPUID=kwargs["GPUID"]
self.prediction
self.optimize
self.error
self.saver
self.cost
#print 'Running deap shark model'
if self.output_dir is not None:
flog=open(str(self.output_dir)+'.log', 'w')
flog.write(str(sys.argv[0])+"\n"
+"the filer number of conv1:"+ str(self.dimension1)+"\n"
+"the filer size of conv1:"+ str(self.conv1_filter)+"\n"
+"the filer number of conv2:"+ str(self.dimension2)+"\n"
+"the filer size of conv2:"+ str(self.conv2_filter)+"\n"
#+"the filer number of conv20:"+ str(self.dimension20)+"\n"
#+"the filer size of conv20:"+ str(self.conv20_filter)+"\n"
+"the filer number of conv21:"+ str(self.dimension21)+"\n"
+"the filer size of conv21:"+ str(self.conv21_filter)+"\n"
+"the filer number of conv22:"+ str(self.dimension22)+"\n"
+"the filer size of conv22:"+ str(self.conv22_filter)+"\n"
+"the number of neurons in the fully-connected layer:"+ str(self.dimension4)+"\n"
+"the standard deviation of initial varialbles:"+ str(self.initial_variation)+"\n"
+"train speed:"+ str(self.train_speed)+"\n"
+"data length:" + str(self.data_length)+"\n")
flog.close()
@define_scope
def prediction(self):
with tf.device('/device:GPU:'+self.GPUID):
x_image = self.image
def weight_variable(shape, variable_name):
initial = tf.truncated_normal(shape, mean=0, stddev=self.initial_variation)
return tf.Variable(initial, name=variable_name)
def bias_variable(shape, variable_name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=variable_name)
def bias_variable_high(shape, variable_name, carry_bias=-0.1):
initial = tf.constant(carry_bias, shape=shape)
return tf.Variable(initial, name=variable_name)
def conv2d_1(x, W):
return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1, 2, 1, 1], padding='VALID')
def conv2d_depth(x, W):
return tf.nn.depthwise_conv2d(x, W, strides=[1, 1, 1, 1], padding='VALID')
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 1, 1], strides=[1, 2, 1, 1], padding='SAME')
def max_pool_4x1(x):
return tf.nn.max_pool(x, ksize=[1, 4, 1, 1], strides=[1, 4, 1, 1], padding='SAME')
def max_pool_8x1(x):
return tf.nn.max_pool(x, ksize=[1, 17, 1, 1], strides=[1, 17, 1, 1], padding='SAME')
l2norm_list=[]
W_conv1 = weight_variable([self.conv1_filter, 4, 1, self.dimension1], 'W_conv1')
cond=tf.constant(0.9)
wconv1_l2=tf.reduce_sum(tf.square(W_conv1))
l2norm_list.append(wconv1_l2)
W_conv1.assign(tf.cond(wconv1_l2>cond, lambda: tf.multiply(W_conv1, cond/wconv1_l2),lambda: W_conv1 ))
h_conv11=conv2d_1(x_image, W_conv1)
#h_conv12=conv2d_1(x_image, tf.reverse(W_conv1, [0, 1]))
h_conv11_ = tf.nn.dropout(tf.nn.relu(h_conv11), self.keep_prob)
#h_conv12_ = tf.nn.dropout(tf.nn.relu(h_conv12), self.keep_prob)
h_pool1 = max_pool_2x2(h_conv11_)
#h_pool1_rc = max_pool_2x2(h_conv12_)
W_conv2 = weight_variable([self.conv2_filter, 1, self.dimension1, self.dimension2], 'W_conv2')
wconv2_l2=tf.reduce_sum(tf.square(W_conv2))
l2norm_list.append(wconv2_l2)
W_conv2.assign(tf.cond(wconv2_l2>cond, lambda: tf.multiply(W_conv2, cond/wconv2_l2),lambda: W_conv2 ))
h_conv2 = tf.nn.dropout(tf.nn.relu(conv2d_1(h_pool1, W_conv2)), self.keep_prob2)
h_pool2 = max_pool_2x2(h_conv2)
W_conv21 = weight_variable([self.conv21_filter, 1, self.dimension2, self.dimension21], 'W_conv21')
wconv21_l2=tf.reduce_sum(tf.square(W_conv21))
l2norm_list.append(wconv21_l2)
W_conv21.assign(tf.cond(wconv21_l2>cond, lambda: tf.multiply(W_conv21, cond/wconv21_l2),lambda: W_conv21 ))
h_conv21 = tf.nn.dropout(tf.nn.relu(conv2d_1(h_pool2, W_conv21)), self.keep_prob2)
h_pool21 = max_pool_2x2(h_conv21)
W_conv22 = weight_variable([self.conv22_filter, 1, self.dimension21, self.dimension22], 'W_conv22')
wconv22_l2=tf.reduce_sum(tf.square(W_conv22))
l2norm_list.append(wconv22_l2)
W_conv22.assign(tf.cond(wconv22_l2>cond, lambda: tf.multiply(W_conv22, cond/wconv22_l2),lambda: W_conv22 ))
h_conv22 = tf.nn.dropout(tf.nn.relu(conv2d_1(h_pool21, W_conv22)), self.keep_prob2)
h_pool22 = max_pool_4x1(h_conv22)
W_fc1 = weight_variable([1 * self.fc1_param * self.dimension22, self.dimension4], 'W_fc1')
wfc1_l2=tf.reduce_sum(tf.square(W_fc1))
l2norm_list.append(wfc1_l2)
W_fc1.assign(tf.cond(wfc1_l2>cond, lambda: tf.multiply(W_fc1, cond/wfc1_l2),lambda: W_fc1 ))
b_fc1 = bias_variable([self.dimension4], 'b_fc1')
bfc1_2=tf.reduce_sum(tf.square(b_fc1))
l2norm_list.append(bfc1_2)
b_fc1.assign(tf.cond(bfc1_2>cond, lambda: tf.multiply(b_fc1, cond/bfc1_2),lambda: b_fc1 ))
h_pool3_flat = tf.reshape(h_pool22, [-1, 1*self.fc1_param*self.dimension22])
h_fc1 = tf.nn.relu(tf.add(tf.matmul(h_pool3_flat, W_fc1), b_fc1))
h_fc1_drop = tf.nn.dropout(h_fc1, self.keep_prob3)
label_shape=self.label.shape[1]
W_fc4 = weight_variable([self.dimension4, tf.cast(label_shape, tf.int32)], 'W_fc4')
wfc4_l2=tf.reduce_sum(tf.square(W_fc4))
l2norm_list.append(wfc4_l2)
W_fc4.assign(tf.cond(wfc4_l2>cond, lambda: tf.multiply(W_fc4, cond/wfc4_l2),lambda: W_fc4 ))
b_fc4 = bias_variable([label_shape], 'b_fc4')
bfc4_l2=tf.reduce_sum(tf.square(b_fc4))
l2norm_list.append(bfc4_l2)
b_fc4.assign(tf.cond(bfc4_l2>cond, lambda: tf.multiply(b_fc4, cond/bfc4_l2),lambda: b_fc4 ))
y_conv=tf.add(tf.matmul(h_fc1_drop, W_fc4), b_fc4)
variable_dict={"W_conv1": W_conv1,
"W_conv2": W_conv2,
"W_conv21": W_conv21,
"W_conv22": W_conv22,
"W_fc1": W_fc1,
"W_fc4": W_fc4,
"b_fc1": b_fc1,
"b_fc4": b_fc4}
neurons_dict={"h_conv22":h_conv22,
"h_conv21":h_conv21,
"h_conv2":h_conv2,
"h_conv11":h_conv11,
#"h_conv12":h_conv12,
"h_fc1_drop": h_fc1_drop,
"h_pool3_flat":h_pool3_flat,
"h_pool22":h_pool22,
"h_pool21":h_pool21,
"h_pool2":h_pool2,
"h_pool1":h_pool1}
#"h_pool1_rc":h_pool1_rc}
return y_conv,tf.nn.sigmoid(y_conv), variable_dict, neurons_dict, l2norm_list
@define_scope
def saver(self):
return tf.train.Saver(max_to_keep=self.max_to_keep)
@define_scope
def cost(self):
with tf.device('/device:GPU:'+self.GPUID):
nll=tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(targets=self.label, logits=self.prediction[0],pos_weight=1.0))
l2_norm=tf.reduce_sum(self.prediction[4])
l1_norm=tf.reduce_sum(tf.abs(self.prediction[1]))
return tf.add_n([nll,tf.multiply((5*10**-7), l2_norm),tf.multiply((1*10**-8),l1_norm)])
@define_scope
def optimize(self):
with tf.device('/device:GPU:'+self.GPUID):
optimizer = tf.train.AdamOptimizer(self.train_speed)
return optimizer.minimize(self.cost)
@define_scope
def error(self):
with tf.device('/device:GPU:'+self.GPUID):
class_n=self.label.shape[1]
FPR_list=[]
TPR_list=[]
PPV_list=[]
for i in range(class_n):
true=self.label[:,i]
prob=self.prediction[1][:,i]
FPR, TPR, PPV=ac(true,prob,0.5)
FPR_list.append(FPR)
TPR_list.append(TPR)
PPV_list.append(PPV)
return FPR_list, TPR_list, PPV_list | PypiClean |
/MeteorTools-2023.9.0-py3-none-any.whl/meteortools/fileformats/kmlHandlers.py | import xmltodict
from shapely.geometry import Polygon
import csv
import simplekml
import numpy as np
import pandas as pd
import os
def readCameraKML(kmlFilename, return_poly=False):
""" Load a KML file and return either a list of lats and longs, or a Shapely polygon
Arguments:
kmlFilename: [string] full path to the KML file to consume
return_poly: [bool] return a Shapely polygon? Default False
Returns:
if return_poly is false, returns a tuple of (cameraname, lats, longs) where lats and longs are lists of the
latitudes and longitudes in the KML file.
If return_poly is true, returns a tuple of (cameranamem, shapely Polygon)
"""
with open(kmlFilename) as fd:
x = xmltodict.parse(fd.read())
cname = x['kml']['Folder']['name']
coords = x['kml']['Folder']['Placemark']['MultiGeometry']['Polygon']['outerBoundaryIs']['LinearRing']['coordinates']
coords = coords.split('\n')
if return_poly is False:
lats = []
lngs = []
for lin in coords:
s = lin.split(',')
lngs.append(float(s[0]))
lats.append(float(s[1]))
return cname, lats, lngs
else:
ptsarr=[]
for lin in coords:
s = lin.split(',')
ptsarr.append((float(s[0]), float(s[1])))
polyg = Polygon(ptsarr)
return cname, polyg
def trackCsvtoKML(trackcsvfile, trackdata=None, saveOutput=True, outdir=None):
"""
Either reads a CSV file containing lat, long, height of an event and
creates a 3d KML file from it or, if trackdata is populated, converts a Pandas dataframe containing
the same data. Output is written to disk unless saveOutput is false.
Arguments:
trackcsvfile: [string] full path to the file to read from
trackdata: [array] pandas dataframe containing the data. Default None
saveOutput: [bool] write the KML file to disk. Default true
outdir: [string] where to save the file. Default same folder as the source file
Returns:
the KML file as a tuple
"""
kml=simplekml.Kml()
kml.document.name = trackcsvfile
if trackdata is None:
inputfile = csv.reader(open(trackcsvfile))
for row in inputfile:
#columns are lat, long, height, times
kml.newpoint(name='', coords=[(row[1], row[0], row[2])])
else:
for i,r in trackdata.iterrows():
kml.newpoint(name=f'{r[3]:.5f}', coords=[(r[1], r[0], r[2])], extrude=1, altitudemode='absolute')
if 'csv' in trackcsvfile:
outname = trackcsvfile.replace('.csv','.kml')
else:
outname = f'{trackcsvfile}.kml'
if saveOutput:
if outdir is None:
outdir, _ = os.path.split(trackcsvfile)
os.makedirs(outdir, exist_ok=True)
outname = os.path.join(outdir, outname)
kml.save(outname)
return kml
def trackKMLtoCsv(kmlfile, kmldata = None, saveOutput=True, outdir=None):
""" convert a track KML retrieved by ukmondb.trajectoryKML to a 3 dimensional CSV file
containing coordinates of points on the trajectory.
Arguments:
kmlfile [string] full path to the KML file to read from.
kmldata [kml] the kml data if available.
saveOutput [bool] Default True, save the output to file.
outdir [string] where to save to if saveOutput is True.
Note: if kmldata is supplied, then kmlfile is ignored.
Returns:
a Pandas dataframe containing the lat, long, alt and time of each
point on the trajectory, sorted by time.
"""
with open(kmlfile) as fd:
x = xmltodict.parse(fd.read())
placemarks=x['kml']['Document']['Placemark']
lats = []
lons = []
alts = []
tims = []
for pm in placemarks:
tims.append(float(pm['name']))
coords = pm['Point']['coordinates'].split(',')
lons.append(float(coords[0]))
lats.append(float(coords[1]))
alts.append(float(coords[2]))
df = pd.DataFrame({"lats": lats, "lons": lons, "alts": alts, "times": tims})
df = df.sort_values(by=['times', 'lats'])
if saveOutput:
fname = kmlfile
if outdir is None:
outdir, fname = os.path.split(kmlfile)
outf = os.path.join(outdir, fname).replace('.kml', '.csv').replace('.KML','.csv')
df.to_csv(outf, index=False)
return df
def getTrackDetails(traj):
""" Get track details from a WMPL trajectory object
Arguments:
traj: a WMPL trajectory object containing observations
Returns:
a Pandas dataframe containing the lat, long, alt and time of each point on the trajectory, sorted by time.
"""
lats = []
lons = []
alts = []
lens = []
# Go through observation from all stations
for obs in traj.observations:
# Go through all observed points
for i in range(obs.kmeas):
lats.append(np.degrees(obs.model_lat[i]))
lons.append(np.degrees(obs.model_lon[i]))
alts.append(obs.model_ht[i])
lens.append(obs.time_data[i])
df = pd.DataFrame({"lats": lats, "lons": lons, "alts": alts, "times": lens})
df = df.sort_values(by=['times', 'lats'])
return df | PypiClean |
/Flask_AppBuilder-4.3.6-py3-none-any.whl/flask_appbuilder/models/mixins.py | from datetime import datetime
import logging
from flask import g
from sqlalchemy import Column, DateTime, ForeignKey, Integer
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import relationship
import sqlalchemy.types as types
log = logging.getLogger(__name__)
class FileColumn(types.TypeDecorator):
"""
Extends SQLAlchemy to support and mostly identify a File Column
"""
impl = types.Text
class ImageColumn(types.TypeDecorator):
"""
Extends SQLAlchemy to support and mostly identify an Image Column
"""
impl = types.Text
def __init__(self, thumbnail_size=(20, 20, True), size=(100, 100, True), **kw):
types.TypeDecorator.__init__(self, **kw)
self.thumbnail_size = thumbnail_size
self.size = size
class AuditMixin(object):
"""
AuditMixin
Mixin for models, adds 4 columns to stamp,
time and user on creation and modification
will create the following columns:
:created on:
:changed on:
:created by:
:changed by:
"""
created_on = Column(DateTime, default=lambda: datetime.now(), nullable=False)
changed_on = Column(
DateTime,
default=lambda: datetime.now(),
onupdate=lambda: datetime.now(),
nullable=False,
)
@declared_attr
def created_by_fk(cls):
return Column(
Integer, ForeignKey("ab_user.id"), default=cls.get_user_id, nullable=False
)
@declared_attr
def created_by(cls):
return relationship(
"User",
primaryjoin="%s.created_by_fk == User.id" % cls.__name__,
enable_typechecks=False,
)
@declared_attr
def changed_by_fk(cls):
return Column(
Integer,
ForeignKey("ab_user.id"),
default=cls.get_user_id,
onupdate=cls.get_user_id,
nullable=False,
)
@declared_attr
def changed_by(cls):
return relationship(
"User",
primaryjoin="%s.changed_by_fk == User.id" % cls.__name__,
enable_typechecks=False,
)
@classmethod
def get_user_id(cls):
try:
return g.user.id
except Exception:
return None
class UserExtensionMixin(object):
__tablename__ = "ab_user_extended"
__mapper_args__ = {"polymorphic_identity": "ab_user_extended"}
@declared_attr
def id(cls):
return Column(None, ForeignKey("ab_user.id"), primary_key=True)
"""
This is for retro compatibility
"""
class BaseMixin(object):
pass | PypiClean |
/BaseCon-1.0.2.tar.gz/BaseCon-1.0.2/basecon.py |
# import directive
__all__ = ('__author__', '__license__', '__version__', 'BaseCon')
# package metadata
__author__ = 'Hansheng Zhao'
__license__ = 'BSD-2-Clause + MIT'
__version__ = '1.0.2'
class BaseCon(object):
""" BaseCon class for converting integers to URL safe strings """
# stop dynamic attribute creation
__slots__ = ('_base', )
# Base conversion charsets and indices
_charset = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz-._'
_reverse = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'A': 10, 'B': 11, 'C': 12,
'D': 13, 'E': 14, 'F': 15, 'G': 16, 'H': 17, 'I': 18, 'J': 19, 'K': 20, 'L': 21, 'M': 22, 'N': 23, 'O': 24, 'P': 25,
'Q': 26, 'R': 27, 'S': 28, 'T': 29, 'U': 30, 'V': 31, 'W': 32, 'X': 33, 'Y': 34, 'Z': 35, 'a': 36, 'b': 37, 'c': 38,
'd': 39, 'e': 40, 'f': 41, 'g': 42, 'h': 43, 'i': 44, 'j': 45, 'k': 46, 'l': 47, 'm': 48, 'n': 49, 'o': 50, 'p': 51,
'q': 52, 'r': 53, 's': 54, 't': 55, 'u': 56, 'v': 57, 'w': 58, 'x': 59, 'y': 60, 'z': 61, '-': 62, '.': 63, '_': 64
}
def __init__(self, base = 62):
""" BaseConvert class for converting integers """
if not (isinstance(base, int) and 2 <= base <= 65):
raise ValueError('Base should be between 2 and 65.')
# preserve base
self._base = base
def __call__(self, data, switch = True):
"""
Alias for encode/decode methods
:param data: int, positive integer
:param switch: bool, whether to encode
:return: str|int, result
"""
return self.encode(data) \
if switch else self.decode(data)
def encode(self, data):
"""
Encode integers into base encoded strings.
:param data: int, the integers to be encoded
:return: str, base encoded string
"""
# check if data payload and base are valid
if not isinstance(data, int) and data > 0:
raise TypeError('Accepts only non-negative int.')
# acquire base
base = self._base
# check if base have known functions
if base == 2: return bin(data)[2:]
elif base == 8: return oct(data)[2:]
elif base == 10: return str(data)
elif base == 16: return hex(data)[2:]
# prepend encoded string to the result
result = ''
while data:
remainder = data % base
data = (data - remainder) // base
result = self._charset[remainder] + result
return result
convert = encode
def decode(self, data):
"""
Decode base encoded strings back into integers.
:param data: str, base encoded string
:return: int, the original number
"""
# check if data payload and base are valid
if isinstance(data, (bytes, bytearray)):
data = data.decode(encoding = 'ASCII')
elif not isinstance(data, str):
raise TypeError('Accepts only str, bytes and bytearray.')
# acquire base
base = self._base
# check if base can be used with int function
if 2 <= base <= 36: return int(data, base)
# iteratively decode the string
result = 0
for item in data:
result = result * base + self._reverse[item]
return result
# aliases for decode
revert = decode | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/chokidar/lib/fsevents-handler.js | 'use strict';
const fs = require('fs');
const sysPath = require('path');
const { promisify } = require('util');
let fsevents;
try {
fsevents = require('fsevents');
} catch (error) {
if (process.env.CHOKIDAR_PRINT_FSEVENTS_REQUIRE_ERROR) console.error(error);
}
if (fsevents) {
// TODO: real check
const mtch = process.version.match(/v(\d+)\.(\d+)/);
if (mtch && mtch[1] && mtch[2]) {
const maj = Number.parseInt(mtch[1], 10);
const min = Number.parseInt(mtch[2], 10);
if (maj === 8 && min < 16) {
fsevents = undefined;
}
}
}
const {
EV_ADD,
EV_CHANGE,
EV_ADD_DIR,
EV_UNLINK,
EV_ERROR,
STR_DATA,
STR_END,
FSEVENT_CREATED,
FSEVENT_MODIFIED,
FSEVENT_DELETED,
FSEVENT_MOVED,
// FSEVENT_CLONED,
FSEVENT_UNKNOWN,
FSEVENT_TYPE_FILE,
FSEVENT_TYPE_DIRECTORY,
FSEVENT_TYPE_SYMLINK,
ROOT_GLOBSTAR,
DIR_SUFFIX,
DOT_SLASH,
FUNCTION_TYPE,
EMPTY_FN,
IDENTITY_FN
} = require('./constants');
const Depth = (value) => isNaN(value) ? {} : {depth: value};
const stat = promisify(fs.stat);
const lstat = promisify(fs.lstat);
const realpath = promisify(fs.realpath);
const statMethods = { stat, lstat };
/**
* @typedef {String} Path
*/
/**
* @typedef {Object} FsEventsWatchContainer
* @property {Set<Function>} listeners
* @property {Function} rawEmitter
* @property {{stop: Function}} watcher
*/
// fsevents instance helper functions
/**
* Object to hold per-process fsevents instances (may be shared across chokidar FSWatcher instances)
* @type {Map<Path,FsEventsWatchContainer>}
*/
const FSEventsWatchers = new Map();
// Threshold of duplicate path prefixes at which to start
// consolidating going forward
const consolidateThreshhold = 10;
const wrongEventFlags = new Set([
69888, 70400, 71424, 72704, 73472, 131328, 131840, 262912
]);
/**
* Instantiates the fsevents interface
* @param {Path} path path to be watched
* @param {Function} callback called when fsevents is bound and ready
* @returns {{stop: Function}} new fsevents instance
*/
const createFSEventsInstance = (path, callback) => {
const stop = fsevents.watch(path, callback);
return {stop};
};
/**
* Instantiates the fsevents interface or binds listeners to an existing one covering
* the same file tree.
* @param {Path} path - to be watched
* @param {Path} realPath - real path for symlinks
* @param {Function} listener - called when fsevents emits events
* @param {Function} rawEmitter - passes data to listeners of the 'raw' event
* @returns {Function} closer
*/
function setFSEventsListener(path, realPath, listener, rawEmitter) {
let watchPath = sysPath.extname(realPath) ? sysPath.dirname(realPath) : realPath;
const parentPath = sysPath.dirname(watchPath);
let cont = FSEventsWatchers.get(watchPath);
// If we've accumulated a substantial number of paths that
// could have been consolidated by watching one directory
// above the current one, create a watcher on the parent
// path instead, so that we do consolidate going forward.
if (couldConsolidate(parentPath)) {
watchPath = parentPath;
}
const resolvedPath = sysPath.resolve(path);
const hasSymlink = resolvedPath !== realPath;
const filteredListener = (fullPath, flags, info) => {
if (hasSymlink) fullPath = fullPath.replace(realPath, resolvedPath);
if (
fullPath === resolvedPath ||
!fullPath.indexOf(resolvedPath + sysPath.sep)
) listener(fullPath, flags, info);
};
// check if there is already a watcher on a parent path
// modifies `watchPath` to the parent path when it finds a match
let watchedParent = false;
for (const watchedPath of FSEventsWatchers.keys()) {
if (realPath.indexOf(sysPath.resolve(watchedPath) + sysPath.sep) === 0) {
watchPath = watchedPath;
cont = FSEventsWatchers.get(watchPath);
watchedParent = true;
break;
}
}
if (cont || watchedParent) {
cont.listeners.add(filteredListener);
} else {
cont = {
listeners: new Set([filteredListener]),
rawEmitter,
watcher: createFSEventsInstance(watchPath, (fullPath, flags) => {
if (!cont.listeners.size) return;
const info = fsevents.getInfo(fullPath, flags);
cont.listeners.forEach(list => {
list(fullPath, flags, info);
});
cont.rawEmitter(info.event, fullPath, info);
})
};
FSEventsWatchers.set(watchPath, cont);
}
// removes this instance's listeners and closes the underlying fsevents
// instance if there are no more listeners left
return () => {
const lst = cont.listeners;
lst.delete(filteredListener);
if (!lst.size) {
FSEventsWatchers.delete(watchPath);
if (cont.watcher) return cont.watcher.stop().then(() => {
cont.rawEmitter = cont.watcher = undefined;
Object.freeze(cont);
});
}
};
}
// Decide whether or not we should start a new higher-level
// parent watcher
const couldConsolidate = (path) => {
let count = 0;
for (const watchPath of FSEventsWatchers.keys()) {
if (watchPath.indexOf(path) === 0) {
count++;
if (count >= consolidateThreshhold) {
return true;
}
}
}
return false;
};
// returns boolean indicating whether fsevents can be used
const canUse = () => fsevents && FSEventsWatchers.size < 128;
// determines subdirectory traversal levels from root to path
const calcDepth = (path, root) => {
let i = 0;
while (!path.indexOf(root) && (path = sysPath.dirname(path)) !== root) i++;
return i;
};
// returns boolean indicating whether the fsevents' event info has the same type
// as the one returned by fs.stat
const sameTypes = (info, stats) => (
info.type === FSEVENT_TYPE_DIRECTORY && stats.isDirectory() ||
info.type === FSEVENT_TYPE_SYMLINK && stats.isSymbolicLink() ||
info.type === FSEVENT_TYPE_FILE && stats.isFile()
)
/**
* @mixin
*/
class FsEventsHandler {
/**
* @param {import('../index').FSWatcher} fsw
*/
constructor(fsw) {
this.fsw = fsw;
}
checkIgnored(path, stats) {
const ipaths = this.fsw._ignoredPaths;
if (this.fsw._isIgnored(path, stats)) {
ipaths.add(path);
if (stats && stats.isDirectory()) {
ipaths.add(path + ROOT_GLOBSTAR);
}
return true;
}
ipaths.delete(path);
ipaths.delete(path + ROOT_GLOBSTAR);
}
addOrChange(path, fullPath, realPath, parent, watchedDir, item, info, opts) {
const event = watchedDir.has(item) ? EV_CHANGE : EV_ADD;
this.handleEvent(event, path, fullPath, realPath, parent, watchedDir, item, info, opts);
}
async checkExists(path, fullPath, realPath, parent, watchedDir, item, info, opts) {
try {
const stats = await stat(path)
if (this.fsw.closed) return;
if (sameTypes(info, stats)) {
this.addOrChange(path, fullPath, realPath, parent, watchedDir, item, info, opts);
} else {
this.handleEvent(EV_UNLINK, path, fullPath, realPath, parent, watchedDir, item, info, opts);
}
} catch (error) {
if (error.code === 'EACCES') {
this.addOrChange(path, fullPath, realPath, parent, watchedDir, item, info, opts);
} else {
this.handleEvent(EV_UNLINK, path, fullPath, realPath, parent, watchedDir, item, info, opts);
}
}
}
handleEvent(event, path, fullPath, realPath, parent, watchedDir, item, info, opts) {
if (this.fsw.closed || this.checkIgnored(path)) return;
if (event === EV_UNLINK) {
const isDirectory = info.type === FSEVENT_TYPE_DIRECTORY
// suppress unlink events on never before seen files
if (isDirectory || watchedDir.has(item)) {
this.fsw._remove(parent, item, isDirectory);
}
} else {
if (event === EV_ADD) {
// track new directories
if (info.type === FSEVENT_TYPE_DIRECTORY) this.fsw._getWatchedDir(path);
if (info.type === FSEVENT_TYPE_SYMLINK && opts.followSymlinks) {
// push symlinks back to the top of the stack to get handled
const curDepth = opts.depth === undefined ?
undefined : calcDepth(fullPath, realPath) + 1;
return this._addToFsEvents(path, false, true, curDepth);
}
// track new paths
// (other than symlinks being followed, which will be tracked soon)
this.fsw._getWatchedDir(parent).add(item);
}
/**
* @type {'add'|'addDir'|'unlink'|'unlinkDir'}
*/
const eventName = info.type === FSEVENT_TYPE_DIRECTORY ? event + DIR_SUFFIX : event;
this.fsw._emit(eventName, path);
if (eventName === EV_ADD_DIR) this._addToFsEvents(path, false, true);
}
}
/**
* Handle symlinks encountered during directory scan
* @param {String} watchPath - file/dir path to be watched with fsevents
* @param {String} realPath - real path (in case of symlinks)
* @param {Function} transform - path transformer
* @param {Function} globFilter - path filter in case a glob pattern was provided
* @returns {Function} closer for the watcher instance
*/
_watchWithFsEvents(watchPath, realPath, transform, globFilter) {
if (this.fsw.closed || this.fsw._isIgnored(watchPath)) return;
const opts = this.fsw.options;
const watchCallback = async (fullPath, flags, info) => {
if (this.fsw.closed) return;
if (
opts.depth !== undefined &&
calcDepth(fullPath, realPath) > opts.depth
) return;
const path = transform(sysPath.join(
watchPath, sysPath.relative(watchPath, fullPath)
));
if (globFilter && !globFilter(path)) return;
// ensure directories are tracked
const parent = sysPath.dirname(path);
const item = sysPath.basename(path);
const watchedDir = this.fsw._getWatchedDir(
info.type === FSEVENT_TYPE_DIRECTORY ? path : parent
);
// correct for wrong events emitted
if (wrongEventFlags.has(flags) || info.event === FSEVENT_UNKNOWN) {
if (typeof opts.ignored === FUNCTION_TYPE) {
let stats;
try {
stats = await stat(path);
} catch (error) {}
if (this.fsw.closed) return;
if (this.checkIgnored(path, stats)) return;
if (sameTypes(info, stats)) {
this.addOrChange(path, fullPath, realPath, parent, watchedDir, item, info, opts);
} else {
this.handleEvent(EV_UNLINK, path, fullPath, realPath, parent, watchedDir, item, info, opts);
}
} else {
this.checkExists(path, fullPath, realPath, parent, watchedDir, item, info, opts);
}
} else {
switch (info.event) {
case FSEVENT_CREATED:
case FSEVENT_MODIFIED:
return this.addOrChange(path, fullPath, realPath, parent, watchedDir, item, info, opts);
case FSEVENT_DELETED:
case FSEVENT_MOVED:
return this.checkExists(path, fullPath, realPath, parent, watchedDir, item, info, opts);
}
}
};
const closer = setFSEventsListener(
watchPath,
realPath,
watchCallback,
this.fsw._emitRaw
);
this.fsw._emitReady();
return closer;
}
/**
* Handle symlinks encountered during directory scan
* @param {String} linkPath path to symlink
* @param {String} fullPath absolute path to the symlink
* @param {Function} transform pre-existing path transformer
* @param {Number} curDepth level of subdirectories traversed to where symlink is
* @returns {Promise<void>}
*/
async _handleFsEventsSymlink(linkPath, fullPath, transform, curDepth) {
// don't follow the same symlink more than once
if (this.fsw.closed || this.fsw._symlinkPaths.has(fullPath)) return;
this.fsw._symlinkPaths.set(fullPath, true);
this.fsw._incrReadyCount();
try {
const linkTarget = await realpath(linkPath);
if (this.fsw.closed) return;
if (this.fsw._isIgnored(linkTarget)) {
return this.fsw._emitReady();
}
this.fsw._incrReadyCount();
// add the linkTarget for watching with a wrapper for transform
// that causes emitted paths to incorporate the link's path
this._addToFsEvents(linkTarget || linkPath, (path) => {
let aliasedPath = linkPath;
if (linkTarget && linkTarget !== DOT_SLASH) {
aliasedPath = path.replace(linkTarget, linkPath);
} else if (path !== DOT_SLASH) {
aliasedPath = sysPath.join(linkPath, path);
}
return transform(aliasedPath);
}, false, curDepth);
} catch(error) {
if (this.fsw._handleError(error)) {
return this.fsw._emitReady();
}
}
}
/**
*
* @param {Path} newPath
* @param {fs.Stats} stats
*/
emitAdd(newPath, stats, processPath, opts, forceAdd) {
const pp = processPath(newPath);
const isDir = stats.isDirectory();
const dirObj = this.fsw._getWatchedDir(sysPath.dirname(pp));
const base = sysPath.basename(pp);
// ensure empty dirs get tracked
if (isDir) this.fsw._getWatchedDir(pp);
if (dirObj.has(base)) return;
dirObj.add(base);
if (!opts.ignoreInitial || forceAdd === true) {
this.fsw._emit(isDir ? EV_ADD_DIR : EV_ADD, pp, stats);
}
}
initWatch(realPath, path, wh, processPath) {
if (this.fsw.closed) return;
const closer = this._watchWithFsEvents(
wh.watchPath,
sysPath.resolve(realPath || wh.watchPath),
processPath,
wh.globFilter
);
this.fsw._addPathCloser(path, closer);
}
/**
* Handle added path with fsevents
* @param {String} path file/dir path or glob pattern
* @param {Function|Boolean=} transform converts working path to what the user expects
* @param {Boolean=} forceAdd ensure add is emitted
* @param {Number=} priorDepth Level of subdirectories already traversed.
* @returns {Promise<void>}
*/
async _addToFsEvents(path, transform, forceAdd, priorDepth) {
if (this.fsw.closed) {
return;
}
const opts = this.fsw.options;
const processPath = typeof transform === FUNCTION_TYPE ? transform : IDENTITY_FN;
const wh = this.fsw._getWatchHelpers(path);
// evaluate what is at the path we're being asked to watch
try {
const stats = await statMethods[wh.statMethod](wh.watchPath);
if (this.fsw.closed) return;
if (this.fsw._isIgnored(wh.watchPath, stats)) {
throw null;
}
if (stats.isDirectory()) {
// emit addDir unless this is a glob parent
if (!wh.globFilter) this.emitAdd(processPath(path), stats, processPath, opts, forceAdd);
// don't recurse further if it would exceed depth setting
if (priorDepth && priorDepth > opts.depth) return;
// scan the contents of the dir
this.fsw._readdirp(wh.watchPath, {
fileFilter: entry => wh.filterPath(entry),
directoryFilter: entry => wh.filterDir(entry),
...Depth(opts.depth - (priorDepth || 0))
}).on(STR_DATA, (entry) => {
// need to check filterPath on dirs b/c filterDir is less restrictive
if (this.fsw.closed) {
return;
}
if (entry.stats.isDirectory() && !wh.filterPath(entry)) return;
const joinedPath = sysPath.join(wh.watchPath, entry.path);
const {fullPath} = entry;
if (wh.followSymlinks && entry.stats.isSymbolicLink()) {
// preserve the current depth here since it can't be derived from
// real paths past the symlink
const curDepth = opts.depth === undefined ?
undefined : calcDepth(joinedPath, sysPath.resolve(wh.watchPath)) + 1;
this._handleFsEventsSymlink(joinedPath, fullPath, processPath, curDepth);
} else {
this.emitAdd(joinedPath, entry.stats, processPath, opts, forceAdd);
}
}).on(EV_ERROR, EMPTY_FN).on(STR_END, () => {
this.fsw._emitReady();
});
} else {
this.emitAdd(wh.watchPath, stats, processPath, opts, forceAdd);
this.fsw._emitReady();
}
} catch (error) {
if (!error || this.fsw._handleError(error)) {
// TODO: Strange thing: "should not choke on an ignored watch path" will be failed without 2 ready calls -__-
this.fsw._emitReady();
this.fsw._emitReady();
}
}
if (opts.persistent && forceAdd !== true) {
if (typeof transform === FUNCTION_TYPE) {
// realpath has already been resolved
this.initWatch(undefined, path, wh, processPath);
} else {
let realPath;
try {
realPath = await realpath(wh.watchPath);
} catch (e) {}
this.initWatch(realPath, path, wh, processPath);
}
}
}
}
module.exports = FsEventsHandler;
module.exports.canUse = canUse; | PypiClean |
/AsyncIRCClient-0.0.8-py3-none-any.whl/async_irc_client/async_irc_client.py | import asyncio
from asyncio import transports
from dataclasses import dataclass, field
from typing import Union, Callable, Any, Coroutine
import datetime
from loguru import logger
def is_event_loop_set() -> bool:
"""
check if asyncio has loop set
:return: bool
"""
try:
# check if any event loop is set
asyncio.get_event_loop()
return True
except RuntimeError as _error:
return False
def get_time_difference(time_str: str) -> float:
"""
get time difference
:param time_str:
:return:
"""
now = datetime.datetime.now().time()
# Parse the target time from the input string
target_time = datetime.datetime.strptime(time_str, '%H:%M').time()
# Calculate the time difference between now and the target time
time_diff = datetime.datetime.combine(datetime.date.today(), target_time) - datetime.datetime.combine(
datetime.date.today(), now)
# If the target time has already passed today, calculate the time difference for tomorrow
if time_diff.total_seconds() < 0:
tomorrow = datetime.date.today() + datetime.timedelta(days=1)
time_diff = datetime.datetime.combine(tomorrow, target_time) - datetime.datetime.combine(
datetime.date.today(), now)
return time_diff.total_seconds()
@dataclass
class CommandCallback(object):
name: str
callback: Callable
mod_only: bool = False
aliases: list[str] = field(default_factory=lambda: [])
case_sensitive: bool = True
@dataclass
class Command(object):
command: Union[None, str] = None
channel: Union[None, str] = None
channel_raw: Union[None, str] = None
bot_command: Union[None, str] = None
bot_command_params: Union[None, str] = None
is_cap_request_enabled: [None, bool] = None
@dataclass
class Source(object):
nick: Union[None, str] = None
host: Union[None, str] = None
@dataclass
class Message(object):
tags: Union[None, dict[str, Union[None, dict, list[str]]]] = None
source: Union[None, Source] = None
command: Union[None, Command] = None
parameters: Union[None, str] = None
def parse_command(cmp) -> Union[Command, None]:
"""
parse command component
:param cmp: cmp
:return: str
"""
parsed_command: Command = Command()
command_parts = cmp.split(' ')
match (command_parts[0]):
case "JOIN" | "PART" | "NOTICE" | "CLEARCHAT" | "HOSTTARGET" | "PRIVMSG" | "USERSTATE" | "ROOMSTATE" | "PONG" | "USERNOTICE":
parsed_command.command = command_parts[0]
parsed_command.channel_raw = command_parts[1]
parsed_command.channel = command_parts[1][1:]
case "CAP":
parsed_command.command = command_parts[0]
parsed_command.is_cap_request_enabled = command_parts[2] == "ACK"
case "GLOBALUSERSTATE" | "PING" | "RECONNECT":
parsed_command.command = command_parts[0]
case "431":
return logger.warning(f"Unsupported IRC command: {command_parts[2]}")
case "001" | "002" | "003" | "004" | "353" | "366" | "372" | "375" | "376" | "433" | "474":
parsed_command.command = command_parts[0]
case _:
return logger.error(f"\n\nUnexpected Command: {command_parts[0]} ({command_parts})\n")
return parsed_command
def parse_tags(cmp: str) -> dict[str, Union[None, dict, list[str]]]:
"""
parse tags
:param cmp: str
:return: Tags
"""
tags_to_ignore: dict[str, Union[None, str]] = {
"client-nonce": None,
"flags": None
}
tags: dict[str, Union[None, dict, list[str]]] = {}
split_tags: list[str] = cmp.split(';')
for tag in split_tags:
key_value: list[str] = tag.split('=')
tag_value: Union[None, str] = None if key_value[1] == '' else key_value[1]
match key_value[0]:
case "badges" | "badge-info":
if tag_value is not None:
data: dict[str, str] = {}
badges: list[str] = tag_value.split(',')
for pair in badges:
badge_parts: list[str] = pair.split('/')
data[badge_parts[0]] = badge_parts[1]
tags[key_value[0]] = data
else:
tags[key_value[0]] = None
case "emotes":
if tag_value is not None:
data: dict = {}
emotes = tag_value.split('/')
for emote in emotes:
emote_parts = emote.split(':')
text_positions = []
positions = emote_parts[1].split(',')
for pos in positions:
pos_parts = pos.split('-')
text_positions.append(
{
"start_position": pos_parts[0],
"end_position": pos_parts[1]
}
)
data[emote_parts[0]] = text_positions
tags[key_value[0]] = data
else:
tags[key_value[0]] = None
case "emote-sets":
emote_set_ids = tag_value.split(',')
tags[key_value[0]] = emote_set_ids
case _:
if key_value[0] not in tags_to_ignore:
tags[key_value[0]] = tag_value
return tags
def parse_source(cmp: str) -> Union[None, Source]:
"""
parses source
:param cmp: source string
:return: Source
"""
if cmp is None:
return None
source_parts: list[str] = cmp.split('!')
return Source(
source_parts[0] if len(source_parts) == 2 else None,
source_parts[1] if len(source_parts) == 2 else source_parts[0]
)
def parse_parameters(cmp: str, command: Command) -> Command:
"""
parse parameters
:param cmp: parameter string
:param command: command object
:return: Command
"""
pointer: int = 0
command_parts = cmp[pointer + 1].strip()
param_pointer = command_parts.find(' ')
if param_pointer == -1:
command.bot_command = command_parts[:]
else:
command.bot_command = command_parts[:param_pointer]
command.bot_command_params = command_parts[param_pointer:].strip()
return command
def parse_message(message_string: str) -> Union[None, Message]:
"""
parse message and return Message object
:param message_string: message string
:return: Message
"""
message = Message()
raw_tags_component: Union[None, str] = None
raw_source_component = None
_raw_command_component = None
raw_parameter_component = None
pointer: int = 0
end_pointer: int
if message_string[pointer] == '@':
end_pointer = message_string.find(' ')
raw_tags_component = message_string[1: end_pointer]
pointer = end_pointer + 1
if message_string[pointer] == ':':
pointer += 1
end_pointer = message_string.find(' ', pointer)
raw_source_component = message_string[pointer: end_pointer]
pointer = end_pointer + 1
# command part
end_pointer = message_string.find(':', pointer)
if end_pointer == -1:
end_pointer = len(message_string)
raw_command_component = message_string[pointer: end_pointer].strip()
# parameters
if end_pointer < len(message_string):
pointer = end_pointer + 1
raw_parameter_component = message_string[pointer:]
# parse command
message.command = parse_command(raw_command_component)
# discard if command is none
if message.command is None:
return None
if raw_tags_component is not None:
message.tags = parse_tags(raw_tags_component)
message.source = parse_source(raw_source_component)
message.parameters = raw_parameter_component
if raw_parameter_component and raw_parameter_component[0] == '!':
message.command = parse_parameters(raw_parameter_component, message.command)
return message
class Timer(object):
def __init__(self, timeout: int, callback: Callable):
"""
async timer
:param timeout: timeout
:param callback: callback
"""
self._timeout: int = timeout
self._callback: Callable = callback
self._task: Union[asyncio.Task, None] = None
async def _wait(self) -> None:
"""
wait until timeout has expired
:return: None
"""
await asyncio.sleep(self._timeout)
await self._callback()
def start(self) -> None:
"""
start timer
:return: None
"""
loop: asyncio.AbstractEventLoop = asyncio.get_event_loop()
self._task = loop.create_task(self._wait())
def cancel(self) -> None:
"""
cancel timer
:return: None
"""
if self._task is None:
return logger.warning("Timer has not been started yet.")
self._task.cancel()
def restart(self) -> None:
"""
restart timer
:return: None
"""
self.cancel()
self.start()
def set_timeout(self, timeout: int) -> None:
"""
set timer timeout.
:param timeout: timeout
:return: None
"""
self._timeout = timeout
class AsyncIRCClientProtocol(asyncio.Protocol):
def __init__(self, on_connection_made: Union[Callable, None] = None,
on_connection_lost: Union[Callable, None] = None, on_data_received: Union[Callable, None] = None):
"""
protocol constructor
:param on_connection_made: on connection established callback
:param on_connection_lost: on connection lost callback
:param on_data_received: on data received callback
"""
self._transport: Union[transports.Transport, None] = None
self._on_connection_made_callback: Union[Callable, None] = on_connection_made
self._on_connection_lost_callback: Union[Callable, None] = on_connection_lost
self._on_data_received_callback: Union[Callable, None] = on_data_received
super().__init__()
def connection_made(self, transport: transports.Transport) -> None:
"""
Upon connection has been established
:param transport: base transport
:return: None
"""
self._transport = transport
if self._on_connection_made_callback is not None:
self._on_connection_made_callback(transport)
def connection_lost(self, exception: Union[Exception, None]) -> None:
"""
connecting to server has been lost
:param exception: exception
:return: None
"""
self._transport.close()
if self._on_connection_made_callback is not None:
self._on_connection_lost_callback(exception)
def data_received(self, data: bytes) -> None:
"""
called upon new data received from server
:param data: data as bytes
:return: None
"""
if self._on_data_received_callback is not None:
decoded_data: str = data.decode()
message: str
for message in decoded_data.split("\r\n"):
if not message:
continue
self._on_data_received_callback(message)
def send_irc_data(self, data: str, log: bool = True) -> None:
"""
send raw irc data as string
:param data: data to send
:param log: log sending
:return: None
"""
if log:
logger.debug(f"Sending {data}")
try:
self._transport.write(bytes(data + "\n\r", "utf-8"))
except Exception as error:
logger.exception(error)
class IRCClientInterfaceMixin(object):
def on_connected_to_server(self) -> None:
"""
called as soon as the client connected to the server
:return: None
"""
def on_disconnected_from_server(self, exception: Exception) -> None:
"""
called as soon as the client disconnects from the server
:param exception: exception that caused the disconnect
:return: None
"""
class IRCClient(IRCClientInterfaceMixin):
def __init__(self, server: str, port: int, loop: Union[asyncio.AbstractEventLoop, None] = None):
"""
constructor
:param server: server to connect to
:param port: port to connect to
:param loop: set custom event loop
"""
self._server: str = server
self._port: int = port
self._loop: Union[asyncio.AbstractEventLoop, None] = loop
self._protocol: Union[AsyncIRCClientProtocol, None] = None
self._transport: Union[transports.Transport, None] = None
self._event_handler: dict[str, Callable] = {}
self._is_connected: bool = False
# get event loop if none set
if self._loop is None:
self._loop = asyncio.get_event_loop() if is_event_loop_set() else asyncio.new_event_loop()
def send_irc_data(self, data: str, log: bool = True) -> None:
"""
send irc data.
:param data: data to send
:param log: log to console
:return: None
"""
if not self._is_connected:
return logger.error("Bot is not connected")
self._protocol.send_irc_data(data, log)
def run(self) -> None:
"""
starts the clients mainloop
:return: None
"""
# create task of our run method
self._loop.create_task(self._connect_and_run(), name="InitialConnectAndRun")
# run the loop forever
try:
self._loop.run_forever()
except KeyboardInterrupt as error:
self._stop_tasks_and_loop(error)
logger.info("Stopped")
def _stop_tasks_and_loop(self, error: KeyboardInterrupt) -> None:
"""
stop tasks and loop
:param error: error
:return: None
"""
logger.debug(error)
self._transport.close()
pending_tasks = asyncio.all_tasks(self._loop)
async def wrapper():
for task in pending_tasks:
logger.debug(f"Cancelling {task}")
task.cancel()
await asyncio.gather(*pending_tasks, return_exceptions=True)
logger.debug(f"Cancelled {pending_tasks}")
logger.debug(f"Current Task: {asyncio.current_task(self._loop)}")
self._loop.run_until_complete(wrapper())
self._loop.stop()
async def _connect_and_run(self) -> None:
"""
connect and run irc loop
:return: None
"""
self._transport, self._protocol = await self._loop.create_connection(
lambda: AsyncIRCClientProtocol(
on_connection_made=self._on_protocol_connection_made,
on_connection_lost=self._on_protocol_connection_lost,
on_data_received=self._on_protocol_data_received
), self._server, self._port
)
await self._on_protocol_done_connecting()
async def _on_protocol_done_connecting(self) -> None:
"""
called as soon as the protocol is done connecting and create_connection has returned
:return: None
"""
self.on_connected_to_server()
def _on_protocol_connection_made(self, transport: transports.Transport) -> None:
"""
on protocol connection made. DO NOT USE. USE _on_protocol_done_connecting instead.
:param transport: transport
:return: None
"""
self._is_connected = True
logger.debug(f"Connected to server with transport {transport}")
def _on_protocol_connection_lost(self, exception: Exception) -> None:
"""
on protocol connection lost
:param exception: exception that caused the disconnect
:return: None
"""
self._is_connected = False
if exception is Exception:
logger.exception(f"Disconnected from server with exception: {exception}")
else:
logger.warning(f"Disconnected from server: {exception}")
self._transport.close()
self.on_disconnected_from_server(exception)
def _on_protocol_data_received(self, message: str) -> None:
"""
on protocol data received
:param message: message
:return: None
"""
class TwitchIRCBotInterfaceMixin(object):
"""
holds interface methods for third-party-users to retain visibility in irc class
"""
async def on_client_ready(self, message: Message) -> None:
"""called when client is ready (001:RPL_WELCOME)"""
async def on_host_info(self, message: Message) -> None:
"""called when server sends host info (002:RPL_YOURHOST)"""
async def on_host_creation_info(self, message: Message) -> None:
"""called when server sends creation info (003:RPL_CREATED)"""
async def on_host_mode_info(self, message: Message) -> None:
"""called on server sends user channel mode info (004:RPL_MYINFO)"""
async def on_user_list_start(self, message: Message) -> None:
"""called on server starts sending user list info (353:RPL_NAMEREPLY)"""
async def on_user_list_end(self, message: Message) -> None:
"""called on server ends sending user list info (366:RPL_ENDOFNAMES )"""
async def on_message_of_the_day(self, message: Message) -> None:
"""called on server sends message of the day (372:RPL_MOTD)"""
async def on_message_of_the_day_start(self, message: Message) -> None:
"""called on server starts sending message of the day (375:RPL_MOTDSTART)"""
async def on_message_of_the_day_end(self, message: Message) -> None:
"""called on server ends sending message of the day (376:RPL_ENDOFMOTD)"""
async def on_no_nickname_given(self, message: Message) -> None:
"""called on no nickname given (431:ERR_NONICKNAMEGIVEN)"""
async def on_nickname_in_use(self, message: Message) -> None:
"""called on nickname in use (433:ERR_NICKNAMEINUSE)"""
async def on_banned_from_channel(self, message: Message) -> None:
"""called on client banned from channel (474:ERR_BANNEDFROMCHANNEL)"""
async def on_client_joined(self, message: Message) -> None:
"""called when client joined (JOIN)"""
async def on_user_join(self, message: Message) -> None:
"""called if any user joins the channel (JOIN)"""
async def on_client_left(self, message: Message) -> None:
"""called when client left (PART)"""
async def on_user_left(self, message: Message) -> None:
"""called on user leaves or gets banned (PART)"""
async def on_irc_capabilities(self, message: Message) -> None:
"""called when receiving command and tag capabilities (CAP) """
async def on_user_state(self, message: Message) -> None:
"""called on user state (USERSTATE)"""
async def on_global_user_state(self, message: Message) -> None:
"""called on global user state update (GLOBALUSERSTATE)"""
async def on_notice(self, message: Message) -> None:
"""called on notice (NOTICE)"""
async def on_hosttarget(self, message: Message) -> None:
"""called on hosttarget (HOSTTARGET)"""
async def on_roomstate(self, message: Message) -> None:
"""called on roomstate (ROOMSTATE)"""
async def on_ping(self, message: Message) -> None:
"""called on ping received (PING)"""
async def on_pong(self, message: Message) -> None:
"""called on pong received (PONG)"""
async def on_reconnect(self, message: Message) -> None:
"""called on reconnect (RECONNECT)"""
async def on_user_notice(self, message: Message) -> None:
"""called on user notice (USERNOTICE)"""
async def on_clear_chat(self, message: Message) -> None:
"""called on clear chat (CLEARCHAT)"""
async def on_message(self, message: Message) -> None:
"""called on message (PRIVMSG)"""
async def on_raid(self, message: Message) -> None:
"""called on raid event (msg-id == raid)"""
class TwitchIRCBot(IRCClient, TwitchIRCBotInterfaceMixin):
TWITCH_IRC_SERVER: str = "irc.chat.twitch.tv"
TWITCH_IRC_PORT: int = 6667
command_callbacks: dict[str, CommandCallback] = {}
case_insensitive: list[str] = []
tasks: list[Callable] = []
_running_tasks: list[Callable] = []
def __init__(self, oauth_token: str, nick_name: str, channel: str, timeout: int = 500, **kwargs):
"""
constructor
:param oauth_token: oauth token
:param nick_name: nickname to use
:param channel: channel name
:param timeout: request ping after n seconds and reconnect after n seconds if no answer was received
:param kwargs: kwargs
"""
super().__init__(
server=kwargs.pop("server", TwitchIRCBot.TWITCH_IRC_SERVER),
port=kwargs.pop("port", TwitchIRCBot.TWITCH_IRC_PORT),
**kwargs
)
self._oauth_token: str = oauth_token
self._nick_name: str = nick_name
self._channel: str = channel
self._has_commands: bool = False
self._has_tags: bool = False
self._timeout: int = timeout
self._disconnect_timer: Timer = Timer(self._timeout, self._on_disconnect_timer_timeout)
self._pong_response_timer: Timer = Timer(20, self._on_pong_response_timer_timeout)
@staticmethod
def command(name: str, mod_only: bool = False, aliases: list[str] = None, case_sensitive: bool = True):
"""
registers function in command table
:param name: name to register as
:param mod_only: is command mod only
:param aliases: alternative names
:param case_sensitive: case-sensitive commands
:return: decorator
"""
if aliases is None:
aliases = []
def decorator(function):
"""
Decorator function to wrap the original function and add it to the command table.
:param function: The function to be wrapped.
:return: The wrapper function.
"""
async def wrapper(*args: Any, **kwargs: Any) -> Any:
"""
Wrapper function that executes the original function.
:param args: Positional arguments passed to the function.
:param kwargs: Keyword arguments passed to the function.
:return: The result of the original function.
"""
return await function(*args, **kwargs)
command_callback: CommandCallback = CommandCallback(name, wrapper, mod_only, aliases, case_sensitive)
TwitchIRCBot.command_callbacks[name] = command_callback
if not case_sensitive:
TwitchIRCBot.case_insensitive.append(name.lower())
for alias in aliases:
TwitchIRCBot.command_callbacks[alias] = command_callback
TwitchIRCBot.case_insensitive.append(alias.lower())
return wrapper
return decorator
@staticmethod
def loop(seconds: int = 0, time: str = None, wait_first: bool = False):
"""
repeat function in given intervals
:param seconds: seconds
:param time: time to repeat function at (for example 12:00)
:param wait_first: wait first before executing the function
:return: None
"""
def decorator(function: Callable):
"""
decorator
:param function: functon to decorate
:return: wrapper
"""
async def wrapper(*args, **kwargs):
"""
wrapper function
:param args: args
:param kwargs: kwargs
:return: None
"""
if wait_first:
while True:
if time is None:
await asyncio.sleep(seconds)
else:
await asyncio.sleep(get_time_difference(time) + 1)
await function(*args, **kwargs)
else:
while True:
await function(*args, **kwargs)
if time is None:
await asyncio.sleep(seconds)
else:
await asyncio.sleep(get_time_difference(time) + 1)
TwitchIRCBot.tasks.append(wrapper)
return wrapper
return decorator
def _send_pong(self) -> None:
"""
send pong response
:returns: None
"""
self.send_irc_data("PONG :tmi.twitch.tv")
def _send_ping(self) -> None:
"""
send pong response
:returns: None
"""
self.send_irc_data("PING :tmi.twitch.tv")
def _on_pong(self) -> None:
"""
on pong received
:return: None
"""
self._pong_response_timer.cancel()
def _reconnect(self) -> None:
"""
reconnect to server
:returns: None
"""
self._pong_response_timer.cancel()
self._disconnect_timer.cancel()
def wrapper():
self._transport.close()
pending_tasks = asyncio.all_tasks(self._loop)
for task in pending_tasks:
logger.warning(f"Stopping task {task}")
task.cancel()
logger.info(asyncio.current_task(self._loop))
asyncio.gather(*pending_tasks, return_exceptions=True)
self._loop.create_task(self._connect_and_run())
self._loop.call_soon_threadsafe(wrapper)
def _check_commands(self, message: Message) -> None:
"""
check if message contains a command
"""
# is not a command
if not message.parameters.startswith("!"):
return
# split after '!'
command_parts: list[str] = message.parameters[1:].split()
command_name: str = command_parts[0]
if command_name.lower() in self.case_insensitive:
command_name = command_name.lower()
command_callback: CommandCallback = TwitchIRCBot.command_callbacks.get(command_name)
if command_callback is None:
return logger.warning(f"No bound command found for: {command_parts}")
# is mod only
if command_callback.mod_only and not self.is_mod_or_broadcaster(message):
return logger.debug(f"User {message.source.nick} tried to issue mod-only command: {message.parameters}")
self._loop.create_task(command_callback.callback(self, message))
def _check_user_notice(self, message: Message) -> None:
"""
check user notice for certain events
:param message: message
:return: None
"""
if self._has_tags and message.tags.get("msg-id") == "raid":
self._loop.create_task(self.on_raid(message))
async def _on_disconnect_timer_timeout(self) -> None:
"""
on disconnect timer times out
:return: None
"""
self._send_ping()
self._pong_response_timer.start()
async def _on_pong_response_timer_timeout(self) -> None:
"""
on pong response timer times out
:return: None
"""
self._reconnect()
async def _on_protocol_done_connecting(self) -> None:
"""
Do not use on_connected_to_server: Should be reserved for users
:return: None
"""
await super()._on_protocol_done_connecting()
# start timer
self._disconnect_timer.start()
for task in self.tasks:
self._loop.create_task(task(self))
self._login()
self._requests_tags()
self._request_commands()
self.join(self._channel)
def _login(self) -> None:
"""
login to twitch using oauth token and nickname
:return: None
"""
logger.debug("Attempting login")
self.send_irc_data(f"PASS oauth:{self._oauth_token}")
self.send_irc_data(f"NICK {self._nick_name}")
def _requests_tags(self) -> None:
"""
request tags
:return: None
"""
logger.debug("Request Tags capability.")
self.send_irc_data("CAP REQ :twitch.tv/tags")
def _request_commands(self) -> None:
"""
request commands
:return: None
"""
logger.debug("Request commands capability.")
self.send_irc_data("CAP REQ :twitch.tv/commands")
def _on_protocol_data_received(self, message: str) -> None:
"""
on protocol data received
:param message: message as string
:return: None
"""
if not message:
return logger.debug("Message is empty.")
# restart timer
self._disconnect_timer.restart()
logger.debug(f"Parsing: {message}")
parsed_message: Message = parse_message(message)
if parsed_message is None:
return logger.debug("Nothing parsed.")
logger.debug(f"Parsed: {parsed_message}")
callback: Union[Callable[[Message], Coroutine[Any, Any, None]], None]
match parsed_message.command.command:
case "CAP":
if parsed_message.parameters == "twitch.tv/tags":
self._has_tags = True
elif parsed_message.parameters == "twitch.tv/commands":
self._has_commands = True
callback = self.on_irc_capabilities
case "JOIN":
callback = self.on_client_joined if parsed_message.source.nick == self._nick_name else self.on_user_join
case "PART":
callback = self.on_client_left if parsed_message.source.nick == self._nick_name else self.on_user_left
case "NOTICE":
callback = self.on_notice
case "CLEARCHAT":
callback = self.on_clear_chat
case "HOSTTARGET":
callback = self.on_hosttarget
case "PRIVMSG":
self._check_commands(parsed_message)
callback = self.on_message
case "ROOMSTATE":
callback = self.on_roomstate
case "USERSTATE":
callback = self.on_user_state
case "GLOBALUSERSTATE":
callback = self.on_global_user_state
case "PING":
self._send_pong()
callback = self.on_ping
case "PONG":
self._on_pong()
callback = self.on_pong
case "RECONNECT":
self._reconnect()
callback = self.on_reconnect
case "USERNOTICE":
self._check_user_notice(parsed_message)
callback = self.on_user_notice
case "001": # successful connection + other auth details
callback = self.on_client_ready
case "002": # server hostname and version
callback = self.on_host_info
case "003": # server creation date
callback = self.on_host_creation_info
case "004": # supported user/channel modes + other stuff
callback = self.on_host_mode_info
case "353": # list users in channel
callback = self.on_user_list_start
case "366": # end of user list (353)
callback = self.on_user_list_end
case "372": # message of the day
callback = self.on_message_of_the_day
case "375": # start message of the day list
callback = self.on_message_of_the_day_start
case "376": # end of message of the day list
callback = self.on_message_of_the_day_end
case "431": # invalid nick
callback = self.on_no_nickname_given
case "433": # nick in use
callback = self.on_nickname_in_use
case "474": # banned
callback = self.on_banned_from_channel
case _:
return logger.warning(f"Invalid command: Callback not found for: {parsed_message.command.command}")
if callback is None:
return logger.error("Callback must not be None.")
self._loop.create_task(callback(parsed_message))
def join(self, channel: str) -> None:
"""
join channel
:param channel: name
:return: None
"""
logger.debug(f"Joining {channel}")
self.send_irc_data(f"JOIN #{channel}")
def leave(self, channel: str) -> None:
"""
leave channel.
:param channel: channel to leave
:return: None
"""
logger.debug(f"Leaving {channel}")
self.send_irc_data(f"PART #{channel}")
def send_chat_message(self, text: str, channel: str = None) -> None:
"""
send chat message
:param text: text to send
:param channel: channel to send message in
:return: None
"""
channel = self._channel if channel is None else channel
self.send_irc_data(f"PRIVMSG #{channel} :{text}")
def is_mod(self, message: Message) -> bool:
"""
checks if issuer is mod
:param message: message
:return: bool
"""
if not self._has_tags:
logger.warning("Client has no tags: Cannot check.")
return False
if message.tags is None or message.tags.get("badges") is None:
return False
return message.tags.get("mod") == "1"
def is_broadcaster(self, message: Message) -> bool:
"""
checks if issuer is broadcaster
:param message: message
:return: bool
"""
if not self._has_tags:
logger.warning("Client has no tags: Cannot check.")
return False
if message.tags is None or message.tags.get("badges") is None:
return False
return message.tags.get("badges", {}).get("broadcaster", "") == '1'
def is_mod_or_broadcaster(self, message: Message) -> bool:
"""
checks if msg is either mod or broadcaster
:param message: message
:return: bool
"""
return self.is_mod(message) or self.is_broadcaster(message) | PypiClean |
/FoilMesh-0.0.8.tar.gz/FoilMesh-0.0.8/foilmesh/meshio/off/_off.py | import numpy as np
from .._common import warn
from .._exceptions import ReadError
from .._files import open_file
from .._helpers import register_format
from .._mesh import CellBlock, Mesh
def read(filename):
with open_file(filename) as f:
points, cells = read_buffer(f)
return Mesh(points, cells)
def read_buffer(f):
# assert that the first line reads `OFF`
line = f.readline()
if isinstance(line, (bytes, bytearray)):
raise ReadError("Expected text buffer, not bytes.")
if line.strip() != "OFF":
raise ReadError("Expected the first line to be `OFF`.")
# fast forward to the next significant line
while True:
line = f.readline().strip()
if line and line[0] != "#":
break
# This next line contains:
# <number of vertices> <number of faces> <number of edges>
num_verts, num_faces, _ = line.split(" ")
num_verts = int(num_verts)
num_faces = int(num_faces)
verts = np.fromfile(f, dtype=float, count=3 * num_verts, sep=" ").reshape(
num_verts, 3
)
data = np.fromfile(f, dtype=int, count=4 * num_faces, sep=" ").reshape(num_faces, 4)
if not np.all(data[:, 0] == 3):
raise ReadError("Can only read triangular faces")
cells = [CellBlock("triangle", data[:, 1:])]
return verts, cells
def write(filename, mesh):
if mesh.points.shape[1] == 2:
warn(
"OFF requires 3D points, but 2D points given. "
"Appending 0 as third component."
)
points = np.column_stack([mesh.points, np.zeros_like(mesh.points[:, 0])])
else:
points = mesh.points
skip = [c for c in mesh.cells if c.type != "triangle"]
if skip:
string = ", ".join(item.type for item in skip)
warn(f"OFF only supports triangle cells. Skipping {string}.")
tri = mesh.get_cells_type("triangle")
with open(filename, "wb") as fh:
fh.write(b"OFF\n")
fh.write(b"# Created by meshio\n\n")
# counts
c = f"{mesh.points.shape[0]} {tri.shape[0]} {0}\n\n"
fh.write(c.encode())
# vertices
# np.savetxt(fh, mesh.points, "%r") # slower
fmt = " ".join(["{}"] * points.shape[1])
out = "\n".join([fmt.format(*row) for row in points]) + "\n"
fh.write(out.encode())
# triangles
out = np.column_stack([np.full(tri.shape[0], 3, dtype=tri.dtype), tri])
# savetxt is slower
# np.savetxt(fh, out, "%d %d %d %d")
fmt = " ".join(["{}"] * out.shape[1])
out = "\n".join([fmt.format(*row) for row in out]) + "\n"
fh.write(out.encode())
register_format("off", [".off"], read, {"off": write}) | PypiClean |
/DiST_Net-0.1.5.tar.gz/DiST_Net-0.1.5/distnet/data_generator/dy_iterator.py | from dataset_iterator import TrackingIterator
import numpy as np
from scipy.ndimage import center_of_mass, find_objects, maximum_filter
from scipy.ndimage.measurements import mean
from math import copysign
import sys
import itertools
import edt
from random import random
class DyIterator(TrackingIterator):
def __init__(self,
dataset,
channel_keywords:list=['/raw', '/regionLabels', '/prevRegionLabels'], # channel @1 must be label & @2 previous label
next:bool = True,
return_categories:bool = True,
closed_end:bool = True,
erase_cut_cell_length:int = 10,
aug_remove_prob:float = 0.05,
aug_frame_subsampling = 1, # either int: subsampling interval will be drawn uniformly in [1,aug_frame_subsampling] or callable that generates a subsampling interval (int)
**kwargs):
if len(channel_keywords)!=3:
raise ValueError('keyword should contain 3 elements in this order: grayscale input images, object labels, object previous labels')
self.return_categories=return_categories
self.closed_end=closed_end
self.erase_cut_cell_length=erase_cut_cell_length
self.aug_frame_subsampling=aug_frame_subsampling
super().__init__(dataset=dataset,
channel_keywords=channel_keywords,
input_channels=[0],
output_channels=[1, 2],
channels_prev=[True]*3,
channels_next=[next]*3,
mask_channels=[1, 2],
aug_remove_prob=aug_remove_prob,
aug_all_frames=False,
**kwargs)
def _get_batch_by_channel(self, index_array, perform_augmentation, input_only=False):
if self.aug_remove_prob>0 and random() < self.aug_remove_prob:
self.n_frames = 0 # flag that aug_remove = true
else:
if self.aug_frame_subsampling!=1 and self.aug_frame_subsampling is not None:
if callable(self.aug_frame_subsampling):
self.n_frames = self.aug_frame_subsampling()
else:
self.n_frames=np.random.randint(self.aug_frame_subsampling)+1
return super()._get_batch_by_channel(index_array, perform_augmentation, input_only)
def _get_input_batch(self, batch_by_channel, ref_chan_idx, aug_param_array):
input = super()._get_input_batch(batch_by_channel, ref_chan_idx, aug_param_array)
return_next = self.channels_next[1]
n_frames = (input.shape[-1]-1)//2 if return_next else input.shape[-1]-1
if n_frames>1:
sel = [0, n_frames, -1] if return_next else [0, -1]
return input[..., sel] # only return
else:
return input
def _get_output_batch(self, batch_by_channel, ref_chan_idx, aug_param_array):
# dy is computed and returned instead of labels & prevLabels
labelIms = batch_by_channel[1]
prevlabelIms = batch_by_channel[2]
return_next = self.channels_next[1]
n_frames = (labelIms.shape[-1]-1)//2 if return_next else labelIms.shape[-1]-1
# remove small objects
mask_to_erase_cur = [chan_idx for chan_idx in self.mask_channels if chan_idx!=1 and chan_idx in batch_by_channel]
mask_to_erase_chan_cur = [1 if self.channels_prev[chan_idx] else 0 for chan_idx in mask_to_erase_cur]
mask_to_erase_prev = [chan_idx for chan_idx in mask_to_erase_cur if self.channels_prev[chan_idx]]
mask_to_erase_chan_prev = [0] * len(mask_to_erase_prev)
if return_next:
mask_to_erase_next = [chan_idx for chan_idx in mask_to_erase_cur if self.channels_next[chan_idx]]
mask_to_erase_chan_next = [2 if self.channels_prev[chan_idx] else 1 for chan_idx in mask_to_erase_next]
for i in range(labelIms.shape[0]):
# cur timepoint
self._erase_small_objects_at_border(labelIms[i,...,n_frames], i, mask_to_erase_cur, mask_to_erase_chan_cur, batch_by_channel)
# prev timepoint
self._erase_small_objects_at_border(labelIms[i,...,0], i, mask_to_erase_prev, mask_to_erase_chan_prev, batch_by_channel)
if return_next:
self._erase_small_objects_at_border(labelIms[i,...,-1], i, mask_to_erase_next, mask_to_erase_chan_next, batch_by_channel)
dyIm = np.zeros(labelIms.shape[:-1]+(2 if return_next else 1,), dtype=self.dtype)
if self.return_categories:
categories = np.zeros(labelIms.shape[:-1]+(1,), dtype=self.dtype)
if return_next:
categories_next = np.zeros(labelIms.shape[:-1]+(1,), dtype=self.dtype)
for i in range(labelIms.shape[0]):
prev_start = n_frames - aug_param_array[i][ref_chan_idx].get('oob_inc', n_frames+1) + 1
_compute_dy(labelIms[i,...,:n_frames+1], prevlabelIms[i,...,prev_start:n_frames+1] if prev_start<n_frames+1 else None, dyIm[i,...,0], categories[i,...,0] if self.return_categories else None)
if return_next:
_compute_dy(labelIms[i,...,n_frames:], prevlabelIms[i,...,n_frames:], dyIm[i,...,1], categories_next[i,...,0] if self.return_categories else None)
other_output_channels = [chan_idx for chan_idx in self.output_channels if chan_idx!=1 and chan_idx!=2]
all_channels = [batch_by_channel[chan_idx] for chan_idx in other_output_channels]
all_channels.insert(0, dyIm)
if self.return_categories:
all_channels.insert(1, categories)
if return_next:
all_channels.insert(2, categories_next)
edm_c = 3 if return_next else 2
chan_map = {0:0, 1:n_frames, 2:-1}
edm = np.zeros(shape = labelIms.shape[:-1]+(edm_c,), dtype=np.float32)
y_up = 1 if self.closed_end else 0
for b,c in itertools.product(range(edm.shape[0]), range(edm.shape[-1])):
# padding along x axis + black_border = False to take into account that cells can go out from upper / lower borders
edm[b,...,c] = edt.edt(np.pad(labelIms[b,...,chan_map[c]], pad_width=((y_up, 0),(1, 1)), mode='constant', constant_values=0), black_border=False)[y_up:,1:-1]
all_channels.append(edm)
return all_channels
def _erase_small_objects_at_border(self, labelImage, batch_idx, channel_idxs, channel_idxs_chan, batch_by_channel):
objects_to_erase = _get_small_objects_at_border_to_erase(labelImage, self.erase_cut_cell_length, self.closed_end)
if len(objects_to_erase)>0:
# erase in all mask image then in label image
for label, slice in objects_to_erase.items():
mask = labelImage[slice] == label
for mask_chan_idx, c in zip(channel_idxs, channel_idxs_chan):
batch_by_channel[mask_chan_idx][batch_idx,...,c][slice][mask]=0
labelImage[slice][mask] = 0
def _get_small_objects_at_border_to_erase(labelIm, min_length, closed_end):
has_object_down, has_object_up = has_object_at_y_borders(labelIm)
res=dict()
if closed_end: # only consider lower part
has_object_up = False
if has_object_up or has_object_down:
stop = labelIm.shape[0]
objects = find_objects(labelIm.astype(np.int))
for l, o in enumerate(objects):
if o is not None:
if (not closed_end and o[0].start==0 and (o[0].stop - o[0].start)<min_length) or (o[0].stop==stop and (o[0].stop - o[0].start)<min_length): # length along Y axis
res[l+1] = o
return res
# dy computation utils
def _get_prev_lab(prevlabelIm, labelIm, label, center):
if int(labelIm[int(round(center[0])), int(round(center[1]))]) == label : # in case center is inluced in object -> simply return value @ center
prev_lab = int(prevlabelIm[int(round(center[0])), int(round(center[1]))])
else:
prev_lab = int(round(mean(prevlabelIm, labelIm, label)))
return prev_lab
def _get_labels_and_centers(labelIm):
labels = np.unique(labelIm)
if len(labels)==0:
return [],[]
labels = [int(round(l)) for l in labels if l!=0]
centers = center_of_mass(labelIm, labelIm, labels)
return dict(zip(labels, centers))
def _compute_dy(labelIm, prevlabelIm, dyIm, categories=None):
labels_map_centers = [_get_labels_and_centers(labelIm[...,c]) for c in range(labelIm.shape[-1])]
if len(labels_map_centers[-1])==0:
return np.zeros(labelIm.shape[:-1], dtype=labelIm.dtype)
if prevlabelIm is None: # previous (augmented) image is current image
labels_map_prev = dict(zip(labels_map_centers[-1].keys(), labels_map_centers[-1].keys()))
else:
labels_map_prev = []
for c in range(1, labelIm.shape[-1]):
prev_c = c - labelIm.shape[-1]
if -prev_c<=prevlabelIm.shape[-1]:
labels_map_prev.append( {label:_get_prev_lab(prevlabelIm[...,prev_c], labelIm[...,c], label, center) for label, center in labels_map_centers[c].items()} )
if len(labels_map_prev) == 1:
labels_map_prev = labels_map_prev[0]
elif len(labels_map_prev)==0: # no previous labels
labels_map_prev = dict(zip(labels_map_centers[-1].keys(), labels_map_centers[-1].keys()))
else: # iterate through lineage
labels_map_prev_ = labels_map_prev[-1]
for c in range(len(labels_map_prev)-2, -1, -1):
labels_map_prev__ = labels_map_prev[c]
get_prev = lambda label : labels_map_prev__[label] if label in labels_map_prev__ else 0
labels_map_prev_ = {label:get_prev(prev) for label,prev in labels_map_prev_.items()}
labels_map_prev = labels_map_prev_
curLabelIm = labelIm[...,-1]
labels_prev = labels_map_centers[0].keys()
for label, center in labels_map_centers[-1].items():
label_prev = labels_map_prev[label]
if label_prev in labels_prev:
dy = center[0] - labels_map_centers[0][label_prev][0] # axis 0 is y
if categories is None and abs(dy)<1:
dy = copysign(1, dy) # min value == 1 / same sign as dy
dyIm[curLabelIm == label] = dy
if categories is not None:
labels_of_prev_counts = dict(zip(*np.unique(list(labels_map_prev.values()), return_counts=True)))
for label, label_prev in labels_map_prev.items():
if label_prev>0 and label_prev not in labels_prev: # no previous
value=3
elif labels_of_prev_counts.get(label_prev, 0)>1: # division
value=2
else: # previous has single next
value=1
categories[curLabelIm == label] = value
def has_object_at_y_borders(mask_img):
return np.any(mask_img[[-1,0], :], 1) # np.flip() | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_mt-mt.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Il-\u0126add",
"It-Tnejn",
"It-Tlieta",
"L-Erbg\u0127a",
"Il-\u0126amis",
"Il-\u0120img\u0127a",
"Is-Sibt"
],
"MONTH": [
"Jannar",
"Frar",
"Marzu",
"April",
"Mejju",
"\u0120unju",
"Lulju",
"Awwissu",
"Settembru",
"Ottubru",
"Novembru",
"Di\u010bembru"
],
"SHORTDAY": [
"\u0126ad",
"Tne",
"Tli",
"Erb",
"\u0126am",
"\u0120im",
"Sib"
],
"SHORTMONTH": [
"Jan",
"Fra",
"Mar",
"Apr",
"Mej",
"\u0120un",
"Lul",
"Aww",
"Set",
"Ott",
"Nov",
"Di\u010b"
],
"fullDate": "EEEE, d 'ta'\u2019 MMMM y",
"longDate": "d 'ta'\u2019 MMMM y",
"medium": "dd MMM y HH:mm:ss",
"mediumDate": "dd MMM y",
"mediumTime": "HH:mm:ss",
"short": "dd/MM/y HH:mm",
"shortDate": "dd/MM/y",
"shortTime": "HH:mm"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "\u20ac",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "mt-mt",
"pluralCat": function(n, opt_precision) { if (n == 1) { return PLURAL_CATEGORY.ONE; } if (n == 0 || n % 100 >= 2 && n % 100 <= 10) { return PLURAL_CATEGORY.FEW; } if (n % 100 >= 11 && n % 100 <= 19) { return PLURAL_CATEGORY.MANY; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/Flask-Statics-Helper-1.0.0.tar.gz/Flask-Statics-Helper-1.0.0/flask_statics/static/angular/i18n/angular-locale_sw-tz.js | 'use strict';
angular.module("ngLocale", [], ["$provide", function($provide) {
var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"};
function getDecimals(n) {
n = n + '';
var i = n.indexOf('.');
return (i == -1) ? 0 : n.length - i - 1;
}
function getVF(n, opt_precision) {
var v = opt_precision;
if (undefined === v) {
v = Math.min(getDecimals(n), 3);
}
var base = Math.pow(10, v);
var f = ((n * base) | 0) % base;
return {v: v, f: f};
}
$provide.value("$locale", {
"DATETIME_FORMATS": {
"AMPMS": [
"AM",
"PM"
],
"DAY": [
"Jumapili",
"Jumatatu",
"Jumanne",
"Jumatano",
"Alhamisi",
"Ijumaa",
"Jumamosi"
],
"MONTH": [
"Januari",
"Februari",
"Machi",
"Aprili",
"Mei",
"Juni",
"Julai",
"Agosti",
"Septemba",
"Oktoba",
"Novemba",
"Desemba"
],
"SHORTDAY": [
"Jumapili",
"Jumatatu",
"Jumanne",
"Jumatano",
"Alhamisi",
"Ijumaa",
"Jumamosi"
],
"SHORTMONTH": [
"Jan",
"Feb",
"Mac",
"Apr",
"Mei",
"Jun",
"Jul",
"Ago",
"Sep",
"Okt",
"Nov",
"Des"
],
"fullDate": "EEEE, d MMMM y",
"longDate": "d MMMM y",
"medium": "d MMM y h:mm:ss a",
"mediumDate": "d MMM y",
"mediumTime": "h:mm:ss a",
"short": "dd/MM/y h:mm a",
"shortDate": "dd/MM/y",
"shortTime": "h:mm a"
},
"NUMBER_FORMATS": {
"CURRENCY_SYM": "TSh",
"DECIMAL_SEP": ".",
"GROUP_SEP": ",",
"PATTERNS": [
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 3,
"minFrac": 0,
"minInt": 1,
"negPre": "-",
"negSuf": "",
"posPre": "",
"posSuf": ""
},
{
"gSize": 3,
"lgSize": 3,
"maxFrac": 2,
"minFrac": 2,
"minInt": 1,
"negPre": "\u00a4-",
"negSuf": "",
"posPre": "\u00a4",
"posSuf": ""
}
]
},
"id": "sw-tz",
"pluralCat": function(n, opt_precision) { var i = n | 0; var vf = getVF(n, opt_precision); if (i == 1 && vf.v == 0) { return PLURAL_CATEGORY.ONE; } return PLURAL_CATEGORY.OTHER;}
});
}]); | PypiClean |
/CPAT-3.0.4.tar.gz/CPAT-3.0.4/.eggs/nose-1.3.7-py3.6.egg/nose/plugins/manager.py | import inspect
import logging
import os
import sys
from itertools import chain as iterchain
from warnings import warn
import nose.config
from nose.failure import Failure
from nose.plugins.base import IPluginInterface
from nose.pyversion import sort_list
try:
import pickle as pickle
except:
import pickle
try:
from io import StringIO
except:
from io import StringIO
__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
'BuiltinPluginManager', 'RestrictedPluginManager']
log = logging.getLogger(__name__)
class PluginProxy(object):
"""Proxy for plugin calls. Essentially a closure bound to the
given call and plugin list.
The plugin proxy also must be bound to a particular plugin
interface specification, so that it knows what calls are available
and any special handling that is required for each call.
"""
interface = IPluginInterface
def __init__(self, call, plugins):
try:
self.method = getattr(self.interface, call)
except AttributeError:
raise AttributeError("%s is not a valid %s method"
% (call, self.interface.__name__))
self.call = self.makeCall(call)
self.plugins = []
for p in plugins:
self.addPlugin(p, call)
def __call__(self, *arg, **kw):
return self.call(*arg, **kw)
def addPlugin(self, plugin, call):
"""Add plugin to my list of plugins to call, if it has the attribute
I'm bound to.
"""
meth = getattr(plugin, call, None)
if meth is not None:
if call == 'loadTestsFromModule' and \
len(inspect.getargspec(meth)[0]) == 2:
orig_meth = meth
meth = lambda module, path, **kwargs: orig_meth(module)
self.plugins.append((plugin, meth))
def makeCall(self, call):
if call == 'loadTestsFromNames':
# special case -- load tests from names behaves somewhat differently
# from other chainable calls, because plugins return a tuple, only
# part of which can be chained to the next plugin.
return self._loadTestsFromNames
meth = self.method
if getattr(meth, 'generative', False):
# call all plugins and yield a flattened iterator of their results
return lambda *arg, **kw: list(self.generate(*arg, **kw))
elif getattr(meth, 'chainable', False):
return self.chain
else:
# return a value from the first plugin that returns non-None
return self.simple
def chain(self, *arg, **kw):
"""Call plugins in a chain, where the result of each plugin call is
sent to the next plugin as input. The final output result is returned.
"""
result = None
# extract the static arguments (if any) from arg so they can
# be passed to each plugin call in the chain
static = [a for (static, a)
in zip(getattr(self.method, 'static_args', []), arg)
if static]
for p, meth in self.plugins:
result = meth(*arg, **kw)
arg = static[:]
arg.append(result)
return result
def generate(self, *arg, **kw):
"""Call all plugins, yielding each item in each non-None result.
"""
for p, meth in self.plugins:
result = None
try:
result = meth(*arg, **kw)
if result is not None:
for r in result:
yield r
except (KeyboardInterrupt, SystemExit):
raise
except:
exc = sys.exc_info()
yield Failure(*exc)
continue
def simple(self, *arg, **kw):
"""Call all plugins, returning the first non-None result.
"""
for p, meth in self.plugins:
result = meth(*arg, **kw)
if result is not None:
return result
def _loadTestsFromNames(self, names, module=None):
"""Chainable but not quite normal. Plugins return a tuple of
(tests, names) after processing the names. The tests are added
to a suite that is accumulated throughout the full call, while
names are input for the next plugin in the chain.
"""
suite = []
for p, meth in self.plugins:
result = meth(names, module=module)
if result is not None:
suite_part, names = result
if suite_part:
suite.extend(suite_part)
return suite, names
class NoPlugins(object):
"""Null Plugin manager that has no plugins."""
interface = IPluginInterface
def __init__(self):
self._plugins = self.plugins = ()
def __iter__(self):
return ()
def _doNothing(self, *args, **kwds):
pass
def _emptyIterator(self, *args, **kwds):
return ()
def __getattr__(self, call):
method = getattr(self.interface, call)
if getattr(method, "generative", False):
return self._emptyIterator
else:
return self._doNothing
def addPlugin(self, plug):
raise NotImplementedError()
def addPlugins(self, plugins):
raise NotImplementedError()
def configure(self, options, config):
pass
def loadPlugins(self):
pass
def sort(self):
pass
class PluginManager(object):
"""Base class for plugin managers. PluginManager is intended to be
used only with a static list of plugins. The loadPlugins() implementation
only reloads plugins from _extraplugins to prevent those from being
overridden by a subclass.
The basic functionality of a plugin manager is to proxy all unknown
attributes through a ``PluginProxy`` to a list of plugins.
Note that the list of plugins *may not* be changed after the first plugin
call.
"""
proxyClass = PluginProxy
def __init__(self, plugins=(), proxyClass=None):
self._plugins = []
self._extraplugins = ()
self._proxies = {}
if plugins:
self.addPlugins(plugins)
if proxyClass is not None:
self.proxyClass = proxyClass
def __getattr__(self, call):
try:
return self._proxies[call]
except KeyError:
proxy = self.proxyClass(call, self._plugins)
self._proxies[call] = proxy
return proxy
def __iter__(self):
return iter(self.plugins)
def addPlugin(self, plug):
# allow, for instance, plugins loaded via entry points to
# supplant builtin plugins.
new_name = getattr(plug, 'name', object())
self._plugins[:] = [p for p in self._plugins
if getattr(p, 'name', None) != new_name]
self._plugins.append(plug)
def addPlugins(self, plugins=(), extraplugins=()):
"""extraplugins are maintained in a separate list and
re-added by loadPlugins() to prevent their being overwritten
by plugins added by a subclass of PluginManager
"""
self._extraplugins = extraplugins
for plug in iterchain(plugins, extraplugins):
self.addPlugin(plug)
def configure(self, options, config):
"""Configure the set of plugins with the given options
and config instance. After configuration, disabled plugins
are removed from the plugins list.
"""
log.debug("Configuring plugins")
self.config = config
cfg = PluginProxy('configure', self._plugins)
cfg(options, config)
enabled = [plug for plug in self._plugins if plug.enabled]
self.plugins = enabled
self.sort()
log.debug("Plugins enabled: %s", enabled)
def loadPlugins(self):
for plug in self._extraplugins:
self.addPlugin(plug)
def sort(self):
return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
def _get_plugins(self):
return self._plugins
def _set_plugins(self, plugins):
self._plugins = []
self.addPlugins(plugins)
plugins = property(_get_plugins, _set_plugins, None,
"""Access the list of plugins managed by
this plugin manager""")
class ZeroNinePlugin:
"""Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
"""
def __init__(self, plugin):
self.plugin = plugin
def options(self, parser, env=os.environ):
self.plugin.add_options(parser, env)
def addError(self, test, err):
if not hasattr(self.plugin, 'addError'):
return
# switch off to addSkip, addDeprecated if those types
from nose.exc import SkipTest, DeprecatedTest
ec, ev, tb = err
if issubclass(ec, SkipTest):
if not hasattr(self.plugin, 'addSkip'):
return
return self.plugin.addSkip(test.test)
elif issubclass(ec, DeprecatedTest):
if not hasattr(self.plugin, 'addDeprecated'):
return
return self.plugin.addDeprecated(test.test)
# add capt
capt = test.capturedOutput
return self.plugin.addError(test.test, err, capt)
def loadTestsFromFile(self, filename):
if hasattr(self.plugin, 'loadTestsFromPath'):
return self.plugin.loadTestsFromPath(filename)
def addFailure(self, test, err):
if not hasattr(self.plugin, 'addFailure'):
return
# add capt and tbinfo
capt = test.capturedOutput
tbinfo = test.tbinfo
return self.plugin.addFailure(test.test, err, capt, tbinfo)
def addSuccess(self, test):
if not hasattr(self.plugin, 'addSuccess'):
return
capt = test.capturedOutput
self.plugin.addSuccess(test.test, capt)
def startTest(self, test):
if not hasattr(self.plugin, 'startTest'):
return
return self.plugin.startTest(test.test)
def stopTest(self, test):
if not hasattr(self.plugin, 'stopTest'):
return
return self.plugin.stopTest(test.test)
def __getattr__(self, val):
return getattr(self.plugin, val)
class EntryPointPluginManager(PluginManager):
"""Plugin manager that loads plugins from the `nose.plugins` and
`nose.plugins.0.10` entry points.
"""
entry_points = (('nose.plugins.0.10', None),
('nose.plugins', ZeroNinePlugin))
def loadPlugins(self):
"""Load plugins by iterating the `nose.plugins` entry point.
"""
from pkg_resources import iter_entry_points
loaded = {}
for entry_point, adapt in self.entry_points:
for ep in iter_entry_points(entry_point):
if ep.name in loaded:
continue
loaded[ep.name] = True
log.debug('%s load plugin %s', self.__class__.__name__, ep)
try:
plugcls = ep.load()
except KeyboardInterrupt:
raise
except Exception as e:
# never want a plugin load to kill the test run
# but we can't log here because the logger is not yet
# configured
warn("Unable to load plugin %s: %s" % (ep, e),
RuntimeWarning)
continue
if adapt:
plug = adapt(plugcls())
else:
plug = plugcls()
self.addPlugin(plug)
super(EntryPointPluginManager, self).loadPlugins()
class BuiltinPluginManager(PluginManager):
"""Plugin manager that loads plugins from the list in
`nose.plugins.builtin`.
"""
def loadPlugins(self):
"""Load plugins in nose.plugins.builtin
"""
from nose.plugins import builtin
for plug in builtin.plugins:
self.addPlugin(plug())
super(BuiltinPluginManager, self).loadPlugins()
try:
import pkg_resources
class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
pass
except ImportError:
class DefaultPluginManager(BuiltinPluginManager):
pass
class RestrictedPluginManager(DefaultPluginManager):
"""Plugin manager that restricts the plugin list to those not
excluded by a list of exclude methods. Any plugin that implements
an excluded method will be removed from the manager's plugin list
after plugins are loaded.
"""
def __init__(self, plugins=(), exclude=(), load=True):
DefaultPluginManager.__init__(self, plugins)
self.load = load
self.exclude = exclude
self.excluded = []
self._excludedOpts = None
def excludedOption(self, name):
if self._excludedOpts is None:
from optparse import OptionParser
self._excludedOpts = OptionParser(add_help_option=False)
for plugin in self.excluded:
plugin.options(self._excludedOpts, env={})
return self._excludedOpts.get_option('--' + name)
def loadPlugins(self):
if self.load:
DefaultPluginManager.loadPlugins(self)
allow = []
for plugin in self.plugins:
ok = True
for method in self.exclude:
if hasattr(plugin, method):
ok = False
self.excluded.append(plugin)
break
if ok:
allow.append(plugin)
self.plugins = allow | PypiClean |
/BlazeUtils-0.7.0-py3-none-any.whl/blazeutils/spreadsheets.py | import datetime as dt
from decimal import Decimal
from io import BytesIO
from random import randint
import os.path as osp
try:
import openpyxl
except ImportError:
openpyxl = None
try:
import xlrd
except ImportError:
xlrd = None
try:
import xlwt
except ImportError:
xlwt = None
try:
import xlsxwriter
except ImportError:
xlsxwriter = None
from .decorators import deprecate
def _xlrd_required():
if xlrd is None:
raise ImportError('xlrd library is required to use this function or class')
def _xlwt_required():
if xlwt is None:
raise ImportError('xlwt library is required to use this function or class')
def http_headers(filename, randomize=True):
basename, ext = osp.splitext(filename)
if randomize:
rand_filename = '{}-{}{}'.format(basename, randint(1000000, 9999999), ext)
headers = {'Content-Disposition': 'attachment; filename={}'.format(rand_filename)}
else:
headers = {'Content-Disposition': 'attachment; filename={}'.format(filename)}
if ext == '.xlsx':
headers['Content-Type'] = 'application/vnd.openxmlformats-officedocument' \
'.spreadsheetml.sheet'
elif ext == '.xls':
headers['Content-Type'] = 'application/vnd.ms-excel'
else:
raise ValueError('filename "{}" does not end with .xls or .xlsx'.format(filename))
return headers
@deprecate('xlrd is no longer maintained, recommend switching to openpyxl')
def workbook_to_reader(xlwt_wb):
"""
convert xlwt Workbook instance to an xlrd instance for reading
"""
_xlrd_required()
fh = BytesIO()
xlwt_wb.save(fh)
# prep for reading
fh.seek(0)
return xlrd.open_workbook(file_contents=fh.read())
def xlsx_to_strio(xlsx_wb):
"""
convert xlwt Workbook instance to a BytesIO instance
"""
fh = BytesIO()
xlsx_wb.filename = fh
xlsx_wb.close()
# prep for reading
fh.seek(0)
return fh
@deprecate('xlrd is no longer maintained, recommend switching to openpyxl')
def xlsx_to_reader(xlsx_wb):
"""
convert xlsxwriter Workbook instance to an xlrd instance for reading
"""
_xlrd_required()
fh = xlsx_to_strio(xlsx_wb)
return xlrd.open_workbook(file_contents=fh.read())
class Writer(object):
STYLE_FACTORY = {}
FONT_FACTORY = {}
def __init__(self, ws=None):
_xlwt_required()
self.ws = ws
self.rownum = 0
self.colnum = 0
def set_sheet(self, ws):
self.ws = ws
self.rownum = 0
self.colnum = 0
def write(self, row, col, data, style=None):
"""
Write data to row, col of worksheet (ws) using the style
information.
Again, I'm wrapping this because you'll have to do it if you
create large amounts of formatted entries in your spreadsheet
(else Excel, but probably not OOo will crash).
"""
ws = self.ws
if not ws:
raise Exception('you must use set_sheet() before write()')
if style:
if isinstance(style, xlwt.Style.XFStyle):
s = style
else:
s = self.get_style(style)
ws.write(row, col, data, s)
else:
ws.write(row, col, data)
def write_merge(self, r1, r2, c1, c2, data, style=None):
"""
Write data to row, col of worksheet (ws) using the style
information.
Again, I'm wrapping this because you'll have to do it if you
create large amounts of formatted entries in your spreadsheet
(else Excel, but probably not OOo will crash).
"""
ws = self.ws
if not ws:
raise Exception('you must use set_sheet() before write()')
if style:
if isinstance(style, xlwt.Style.XFStyle):
s = style
else:
s = self.get_style(style)
ws.write_merge(r1, r2, c1, c2, data, s)
else:
ws.write_merge(r1, r2, c1, c2, data)
def mwrite(self, col_vals, style=None, nextrow=False):
for val in col_vals:
self.awrite(val, style)
if nextrow:
self.newrow()
def awrite(self, data=None, style=None, nextrow=False):
"""
Auto Write: Similar to write, except that the row and column
numbers are handled automatically and based on the extra
parameters to this method.
"""
self.write(self.rownum, self.colnum, data, style)
self.colnum += 1
if nextrow:
self.newrow()
def newrow(self):
self.rownum += 1
self.colnum = 0
def get_style(self, style):
"""
Style is a dict maping key to values.
Valid keys are: background, format, alignment, border
The values for keys are lists of tuples containing (attribute,
value) pairs to set on model instances...
"""
style_key = tuple(style.items())
s = self.STYLE_FACTORY.get(style_key, None)
if s is None:
s = xlwt.XFStyle()
for key, values in style.items():
if key == "background":
p = xlwt.Pattern()
for attr, value in values:
p.__setattr__(attr, value)
s.pattern = p
elif key == "format":
s.num_format_str = values
elif key == "alignment":
a = xlwt.Alignment()
for attr, value in values:
a.__setattr__(attr, value)
s.alignment = a
elif key == "border":
b = xlwt.Formatting.Borders()
for attr, value in values:
b.__setattr__(attr, value)
s.borders = b
elif key == "font":
f = self.get_font(values)
s.font = f
self.STYLE_FACTORY[style_key] = s
return s
def get_font(self, values):
"""
'height' 10pt = 200, 8pt = 160
"""
font_key = values
f = self.FONT_FACTORY.get(font_key, None)
if f is None:
f = xlwt.Font()
for attr, value in values:
f.__setattr__(attr, value)
self.FONT_FACTORY[font_key] = f
return f
class _OpenpyxlWriter:
def __init__(self, ws):
self.set_sheet(ws)
def set_sheet(self, ws):
self.ws = ws
# Openpyxl uses 1-based indexing for cells
self.rownum = 1
self.colnum = 1
def awrite(self, data, style=None, nextrow=False):
cell = self.ws.cell(column=self.colnum, row=self.rownum, value=data)
if style:
cell.style = style
self.colnum += 1
if nextrow:
self.nextrow()
def mwrite(self, *colvals, style=None, nextrow=False):
for val in colvals:
self.awrite(val, style)
if nextrow:
self.nextrow()
def nextrow(self):
self.rownum += 1
self.colnum = 1
class _XlsxwriterWriter:
def __init__(self, ws):
self.set_sheet(ws)
def set_sheet(self, ws):
self.ws = ws
self.rownum = 0
self.colnum = 0
def awrite(self, data, style=None, nextrow=False):
self.ws.write(self.rownum, self.colnum, data, style)
self.colnum += 1
if nextrow:
self.nextrow()
def mwrite(self, *colvals, style=None, nextrow=False):
for val in colvals:
self.awrite(val, style)
if nextrow:
self.nextrow()
def nextrow(self):
self.rownum += 1
self.colnum = 0
class WriterX:
@property
def ws(self):
return self._writer.ws
@property
def rownum(self):
return self._writer.rownum
@rownum.setter
def rownum(self, value):
self._writer.rownum = value
@property
def colnum(self):
return self._writer.colnum
@colnum.setter
def colnum(self, value):
self._writer.colnum = value
def __init__(self, ws):
self._writer = None
if openpyxl:
from openpyxl.worksheet.worksheet import Worksheet
if isinstance(ws, Worksheet):
self._writer = _OpenpyxlWriter(ws)
if xlsxwriter:
from xlsxwriter.worksheet import Worksheet
if isinstance(ws, Worksheet):
self._writer = _XlsxwriterWriter(ws)
if not self._writer:
raise TypeError('worksheet type not supported')
def set_sheet(self, ws):
self._writer.set_sheet(ws)
def awrite(self, data, style=None, nextrow=False):
self._writer.awrite(data, style=style, nextrow=nextrow)
def mwrite(self, *colvals, style=None, nextrow=False):
self._writer.mwrite(*colvals, style=style, nextrow=nextrow)
def nextrow(self):
self._writer.nextrow()
class XlwtHelper(Writer):
@deprecate('XlwtHelper has been renamed to Writer')
def __init__(self, ws=None):
Writer.__init__(self, ws)
class _XlrdReader:
@deprecate('xlrd is no longer maintained, recommend switching to openpyxl')
def __init__(self, xlrd_wb, sheetnum=0):
self.book = xlrd_wb
self.sheetnum = sheetnum
self.rownum = 0
self.colnum = 0
self.sheet = self.book.sheet_by_index(self.sheetnum)
def cell_value(self, is_date=False):
try:
return self.sheet.cell_value(self.rownum, self.colnum)
finally:
self.colnum += 1
def cell_date(self):
value = self.cell_value()
date_tuple = xlrd.xldate_as_tuple(value, self.book.datemode)
return dt.datetime(*date_tuple).date()
def cell_datetime(self):
value = self.cell_value()
date_tuple = xlrd.xldate_as_tuple(value, self.book.datemode)
return dt.datetime(*date_tuple)
def cell_decimal(self):
value = self.cell_value()
return Decimal(value)
def next_row(self):
self.rownum += 1
self.colnum = 0
class _OpenpyxlReader:
def __init__(self, openpyxl_wb, sheetnum=0):
self.book = openpyxl_wb
self.sheetnum = sheetnum
# Openpyxl uses 1-based indexing for cells
self.rownum = 1
self.colnum = 1
self.sheet = self.book[self.book.sheetnames[sheetnum]]
def cell_value(self, is_date=False):
try:
return self.sheet.cell(self.rownum, self.colnum).value
finally:
self.colnum += 1
def cell_date(self):
# Openpyxl automatically converts to date or datetime
value = self.cell_value()
if isinstance(value, dt.datetime):
return value.date()
elif isinstance(value, dt.date):
return value
else:
raise TypeError(f'{value}: not a date')
def cell_datetime(self):
# Openpyxl automatically converts to date or datetime
value = self.cell_value()
if isinstance(value, dt.datetime):
return value
elif isinstance(value, dt.date):
return dt.datetime(value.year, value.month, value.day)
else:
raise ValueError(f'{value}: not a datetime')
def cell_decimal(self):
value = self.cell_value()
return Decimal(value)
def next_row(self):
self.rownum += 1
self.colnum = 1
class Reader(object):
def __init__(self, workbook, sheetnum=0):
self._reader = None
if openpyxl:
from openpyxl.workbook import Workbook
if isinstance(workbook, Workbook):
self._reader = _OpenpyxlReader(workbook, sheetnum=sheetnum)
if xlrd:
if xlsxwriter:
from xlsxwriter import Workbook
if isinstance(workbook, Workbook):
self._reader = _XlrdReader(workbook, sheetnum=sheetnum)
if xlwt:
from xlwt.Workbook import Workbook
if isinstance(workbook, Workbook):
self._reader = _XlrdReader(workbook, sheetnum=sheetnum)
if not self._reader:
raise TypeError('workbook type not supported')
@classmethod
def from_xlwt(cls, xlwt_wb):
wb = workbook_to_reader(xlwt_wb)
return _XlrdReader(wb)
@classmethod
def from_xlsx(cls, xlsx_wb):
wb = xlsx_to_reader(xlsx_wb)
return _XlrdReader(wb)
def cell_value(self, is_date=False):
return self._reader.cell_value(is_date=is_date)
def cell_date(self):
return self._reader.cell_date()
def cell_datetime(self):
return self._reader.cell_datetime()
def cell_decimal(self):
return self._reader.cell_decimal()
def next_row(self):
self._reader.next_row() | PypiClean |
/NREL_reV-0.8.1-py3-none-any.whl/reV/bespoke/gradient_free.py | import numpy as np
import time
from math import log
import logging
logger = logging.getLogger(__name__)
class GeneticAlgorithm():
"""a simple genetic algorithm used to select bespoke turbine locations
"""
def __init__(self, bits, bounds, variable_type, objective_function,
max_generation=100, population_size=0, crossover_rate=0.1,
mutation_rate=0.01, tol=1E-6, convergence_iters=5,
max_time=3600):
"""
Parameters
----------
bits : array of ints
The number of bits assigned to each of the design variables.
The number of discretizations for each design variables will be
2^n where n is the number of bits assigned to that variable.
bounds : array of tuples
The bounds for each design variable. This parameter looks like:
np.array([(lower, upper), (lower, upper)...])
variable_type : array of strings ('int' or 'float')
The type of each design variable (int or float).
objective_function : function handle for the objective that is to be
minimized. Should take a single variable as an input which is a
list/array of the design variables.
max_generation : int, optional
The maximum number of generations that will be run in the genetic
algorithm.
population_size : int, optional
The population size in the genetic algorithm.
crossover_rate : float, optional
The probability of crossover for a single bit during the crossover
phase of the genetic algorithm.
mutation_rate : float, optional
The probability of a single bit mutating during the mutation phase
of the genetic algorithm.
tol : float, optional
The absolute tolerance to determine convergence.
convergence_iters : int, optional
The number of generations to determine convergence.
max_time : float
The maximum time (in seconds) to run the genetic algorithm.
"""
logger.debug('Initializing GeneticAlgorithm...')
logger.debug('Minimum convergence iterations: {}'
.format(convergence_iters))
logger.debug('Max iterations (generations): {}'.format(max_generation))
logger.debug('Population size: {}'.format(population_size))
logger.debug('Crossover rate: {}'.format(crossover_rate))
logger.debug('Mutation rate: {}'.format(mutation_rate))
logger.debug('Convergence tolerance: {}'.format(tol))
logger.debug('Maximum runtime (in seconds): {}'.format(max_time))
# inputs
self.bits = bits
self.bounds = bounds
self.variable_type = variable_type
self.objective_function = objective_function
self.max_generation = max_generation
self.population_size = population_size
self.crossover_rate = crossover_rate
self.mutation_rate = mutation_rate
self.tol = tol
self.convergence_iters = convergence_iters
self.max_time = max_time
# internal variables, you could output some of this info if you wanted
self.design_variables = np.array([]) # the desgin variables as they
# are passed into self.objective function
self.nbits = 0 # the total number of bits in each chromosome
self.nvars = 0 # the total number of design variables
self.parent_population = np.array([]) # 2D array containing all of the
# parent individuals
self.offspring_population = np.array([]) # 2D array containing all of
# the offspring individuals
self.parent_fitness = np.array([]) # array containing all of the
# parent fitnesses
self.offspring_fitness = np.array([]) # array containing all of the
# offspring fitnesses
self.discretized_variables = {} # a dict of arrays containing all of
# the discretized design variable
# outputs
self.solution_history = np.array([])
self.optimized_function_value = 0.0
self.optimized_design_variables = np.array([])
self.initialize_design_variables()
self.initialize_bits()
if self.population_size % 2 == 1:
self.population_size += 1
self.initialize_population()
self.initialize_fitness()
if self.population_size > 5:
n = 5
else:
n = self.population_size
logger.debug('The first few parent individuals are: {}'
.format(self.parent_population[0:n]))
logger.debug('The first few parent fitness values are: {}'
.format(self.parent_fitness[0:n]))
def initialize_design_variables(self):
"""initialize the design variables from the randomly initialized
population
"""
# determine the number of design variables and initialize
self.nvars = len(self.variable_type)
self.design_variables = np.zeros(self.nvars)
float_ind = 0
for i in range(self.nvars):
if self.variable_type[i] == "float":
ndiscretizations = 2**self.bits[i]
self.discretized_variables["float_var%s" % float_ind] = \
np.linspace(self.bounds[i][0], self.bounds[i][1],
ndiscretizations)
float_ind += 1
def initialize_bits(self):
"""determine the total number of bits"""
# determine the total number of bits
for i in range(self.nvars):
if self.variable_type[i] == "int":
int_range = self.bounds[i][1] - self.bounds[i][0]
int_bits = int(np.ceil(log(int_range, 2)))
self.bits[i] = int_bits
self.nbits += self.bits[i]
def initialize_population(self):
"""randomly initialize the parent and offspring populations"""
all_bits_on = np.ones((1, self.nbits))
random_bits_on = np.random.randint(
0, high=2, size=(self.population_size - 1, self.nbits)
)
self.parent_population = np.r_[all_bits_on, random_bits_on]
self.offspring_population = np.zeros_like(self.parent_population)
def initialize_fitness(self):
"""initialize the fitness of member of the parent population"""
# initialize the fitness arrays
self.parent_fitness = np.zeros(self.population_size)
self.offspring_fitness = np.zeros(self.population_size)
# initialize fitness of the parent population
for i in range(self.population_size):
self.chromosome_2_variables(self.parent_population[i])
self.parent_fitness[i] = \
self.objective_function(self.design_variables)
def chromosome_2_variables(self, chromosome):
"""convert the binary chromosomes to design variable values"""
first_bit = 0
float_ind = 0
for i in range(self.nvars):
binary_value = 0
for j in range(self.bits[i]):
binary_value += chromosome[first_bit + j] * 2**j
first_bit += self.bits[i]
if self.variable_type[i] == "float":
self.design_variables[i] = \
self.discretized_variables["float_var%s"
% float_ind][binary_value]
float_ind += 1
elif self.variable_type[i] == "int":
self.design_variables[i] = self.bounds[i][0] + binary_value
def crossover(self):
"""perform crossover between individual parents"""
self.offspring_population[:, :] = self.parent_population[:, :]
# mate conscutive pairs of parents (0, 1), (2, 3), ...
# The population is shuffled so this does not need to be randomized
for i in range(int(self.population_size / 2)):
# trade bits in the offspring
crossover_arr = np.random.rand(self.nbits)
for j in range(self.nbits):
if crossover_arr[j] < self.crossover_rate:
self.offspring_population[2 * i][j], \
self.offspring_population[2 * i + 1][j] = \
self.offspring_population[2 * i + 1][j], \
self.offspring_population[2 * i][j]
def mutate(self):
"""randomly mutate bits of each chromosome"""
for i in range(int(self.population_size)):
# mutate bits in the offspring
mutate_arr = np.random.rand(self.nbits)
for j in range(self.nbits):
if mutate_arr[j] < self.mutation_rate:
self.offspring_population[i][j] = \
(self.offspring_population[i][j] + 1) % 2
def optimize_ga(self):
"""run the genetic algorithm"""
converged = False
ngens = 1
generation = 1
difference = self.tol * 10000.0
self.solution_history = np.zeros(self.max_generation + 1)
self.solution_history[0] = np.min(self.parent_fitness)
run_time = 0.0
start_time = time.time()
while converged is False and ngens < self.max_generation and \
run_time < self.max_time:
self.crossover()
self.mutate()
# determine fitness of offspring
for i in range(self.population_size):
self.chromosome_2_variables(self.offspring_population[i])
self.offspring_fitness[i] = \
self.objective_function(self.design_variables)
# rank the total population from best to worst
total_fitness = np.append(self.parent_fitness,
self.offspring_fitness)
ranked_fitness = \
np.argsort(total_fitness)[0:int(self.population_size)]
total_population = \
np.vstack([self.parent_population, self.offspring_population])
self.parent_population[:, :] = total_population[ranked_fitness, :]
self.parent_fitness[:] = total_fitness[ranked_fitness]
# store solution history and wrap up generation
self.solution_history[generation] = np.min(self.parent_fitness)
if generation > self.convergence_iters:
difference = \
self.solution_history[generation - self.convergence_iters]\
- self.solution_history[generation]
else:
difference = 1000
if abs(difference) <= self.tol:
converged = True
# shuffle up the order of the population
shuffle_order = np.arange(1, self.population_size)
np.random.shuffle(shuffle_order)
shuffle_order = np.append([0], shuffle_order)
self.parent_population = self.parent_population[shuffle_order]
self.parent_fitness = self.parent_fitness[shuffle_order]
generation += 1
ngens += 1
run_time = time.time() - start_time
# Assign final outputs
self.solution_history = self.solution_history[0:ngens]
self.optimized_function_value = np.min(self.parent_fitness)
self.chromosome_2_variables(
self.parent_population[np.argmin(self.parent_fitness)])
self.optimized_design_variables = self.design_variables
logger.debug('The GA ran for this many generations: {}'
.format(ngens))
logger.debug('The GA ran for this many seconds: {:.3f}'
.format(run_time))
logger.debug('The optimized function value was: {:.3e}'
.format(self.optimized_function_value))
logger.debug('The optimal design variables were: {}'
.format(self.optimized_design_variables)) | PypiClean |
/MergePythonSDK.ticketing-2.2.2-py3-none-any.whl/MergePythonSDK/crm/model/task.py | import re # noqa: F401
import sys # noqa: F401
from typing import (
Optional,
Union,
List,
Dict,
)
from MergePythonSDK.shared.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
OpenApiModel,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from MergePythonSDK.shared.exceptions import ApiAttributeError
from MergePythonSDK.shared.model_utils import import_model_by_name
def lazy_import():
from MergePythonSDK.shared.model.remote_data import RemoteData
from MergePythonSDK.crm.model.task_status_enum import TaskStatusEnum
globals()['RemoteData'] = RemoteData
globals()['TaskStatusEnum'] = TaskStatusEnum
class Task(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
return (bool, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
defined_types = {
'id': (str, none_type,), # noqa: E501
'remote_id': (str, none_type, none_type,), # noqa: E501
'subject': (str, none_type, none_type,), # noqa: E501
'content': (str, none_type, none_type,), # noqa: E501
'owner': (str, none_type, none_type,), # noqa: E501
'account': (str, none_type, none_type,), # noqa: E501
'completed_date': (datetime, none_type, none_type,), # noqa: E501
'due_date': (datetime, none_type, none_type,), # noqa: E501
'status': (TaskStatusEnum, str, none_type,),
'remote_data': ([RemoteData], none_type, none_type,), # noqa: E501
'remote_was_deleted': (bool, none_type,), # noqa: E501
}
expands_types = {"account": "Account", "owner": "User"}
# update types with expands
for key, val in expands_types.items():
if key in defined_types.keys():
expands_model = import_model_by_name(val, "crm")
if len(defined_types[key]) > 0 and isinstance(defined_types[key][0], list):
defined_types[key][0].insert(0, expands_model)
defined_types[key] = (*defined_types[key], expands_model)
return defined_types
@cached_property
def discriminator():
return None
attribute_map = {
'id': 'id', # noqa: E501
'remote_id': 'remote_id', # noqa: E501
'subject': 'subject', # noqa: E501
'content': 'content', # noqa: E501
'owner': 'owner', # noqa: E501
'account': 'account', # noqa: E501
'completed_date': 'completed_date', # noqa: E501
'due_date': 'due_date', # noqa: E501
'status': 'status', # noqa: E501
'remote_data': 'remote_data', # noqa: E501
'remote_was_deleted': 'remote_was_deleted', # noqa: E501
}
read_only_vars = {
'id', # noqa: E501
'remote_data', # noqa: E501
'remote_was_deleted', # noqa: E501
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Task - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
remote_id (str, none_type): The third-party API ID of the matching object.. [optional] # noqa: E501
subject (str, none_type): The task's subject.. [optional] # noqa: E501
content (str, none_type): The task's content.. [optional] # noqa: E501
owner (str, none_type): [optional] # noqa: E501
account (str, none_type): [optional] # noqa: E501
completed_date (datetime, none_type): When the task is completed.. [optional] # noqa: E501
due_date (datetime, none_type): When the task is due.. [optional] # noqa: E501
status (bool, dict, float, int, list, str, none_type): The task's status.. [optional] # noqa: E501
remote_data ([RemoteData], none_type): [optional] # noqa: E501
remote_was_deleted (bool): Indicates whether or not this object has been deleted by third party webhooks.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', True)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.remote_id = kwargs.get("remote_id", None)
self.subject = kwargs.get("subject", None)
self.content = kwargs.get("content", None)
self.owner = kwargs.get("owner", None)
self.account = kwargs.get("account", None)
self.completed_date = kwargs.get("completed_date", None)
self.due_date = kwargs.get("due_date", None)
self.status = kwargs.get("status", None)
# Read only properties
self._id = kwargs.get("id", str())
self._remote_data = kwargs.get("remote_data", None)
self._remote_was_deleted = kwargs.get("remote_was_deleted", bool())
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Task - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
id (str): [optional] # noqa: E501
remote_id (str, none_type): The third-party API ID of the matching object.. [optional] # noqa: E501
subject (str, none_type): The task's subject.. [optional] # noqa: E501
content (str, none_type): The task's content.. [optional] # noqa: E501
owner (str, none_type): [optional] # noqa: E501
account (str, none_type): [optional] # noqa: E501
completed_date (datetime, none_type): When the task is completed.. [optional] # noqa: E501
due_date (datetime, none_type): When the task is due.. [optional] # noqa: E501
status (bool, dict, float, int, list, str, none_type): The task's status.. [optional] # noqa: E501
remote_data ([RemoteData], none_type): [optional] # noqa: E501
remote_was_deleted (bool): Indicates whether or not this object has been deleted by third party webhooks.. [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
for arg in args:
if isinstance(arg, dict):
kwargs.update(arg)
else:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.remote_id: Union[str, none_type] = kwargs.get("remote_id", None)
self.subject: Union[str, none_type] = kwargs.get("subject", None)
self.content: Union[str, none_type] = kwargs.get("content", None)
self.owner: Union[str, none_type] = kwargs.get("owner", None)
self.account: Union[str, none_type] = kwargs.get("account", None)
self.completed_date: Union[datetime, none_type] = kwargs.get("completed_date", None)
self.due_date: Union[datetime, none_type] = kwargs.get("due_date", None)
self.status: Union[bool, dict, float, int, list, str, none_type] = kwargs.get("status", None)
# Read only properties
self._id: Union[str] = kwargs.get("id", str())
self._remote_data: Union[List["RemoteData"]] = kwargs.get("remote_data", None)
self._remote_was_deleted: Union[bool] = kwargs.get("remote_was_deleted", bool())
# Read only property getters
@property
def id(self):
return self._id
@property
def remote_data(self):
return self._remote_data
@property
def remote_was_deleted(self):
return self._remote_was_deleted | PypiClean |
/MirrorHerokuX-6.0.3-py3-none-any.whl/bot/helper/mirror_utils/download_utils/direct_link_generator.py | import json
import re
import math
import urllib.parse
from os import popen
from random import choice
from urllib.parse import urlparse
import lk21
import requests
import logging
from bot import UPTOBOX_TOKEN
from bs4 import BeautifulSoup
from lk21.extractors.bypasser import Bypass
from base64 import standard_b64encode
from js2py import EvalJs
from bot.helper.ext_utils.exceptions import DirectDownloadLinkException
def direct_link_generator(link: str):
""" direct links generator """
if not link:
raise DirectDownloadLinkException("`No links found!`")
elif 'zippyshare.com' in link:
return zippy_share(link)
elif 'yadi.sk' in link:
return yandex_disk(link)
elif 'cloud.mail.ru' in link:
return cm_ru(link)
elif 'mediafire.com' in link:
return mediafire(link)
elif 'uptobox.com' in link:
return uptobox(link)
elif 'osdn.net' in link:
return osdn(link)
elif 'github.com' in link:
return github(link)
elif 'fembed.com' in link:
return fembed(link)
elif 'femax20.com' in link:
return fembed(link)
elif 'feurl.com' in link:
return fembed(link)
else:
raise DirectDownloadLinkException(f'No Direct link function found for {link}')
def zippy_share(url: str) -> str:
link = re.findall("https:/.(.*?).zippyshare", url)[0]
response_content = (requests.get(url)).content
bs_obj = BeautifulSoup(response_content, "lxml")
try:
js_script = bs_obj.find("div", {"class": "center",}).find_all(
"script"
)[1]
except:
js_script = bs_obj.find("div", {"class": "right",}).find_all(
"script"
)[0]
js_content = re.findall(r'\.href.=."/(.*?)";', str(js_script))
js_content = 'var x = "/' + js_content[0] + '"'
evaljs = EvalJs()
setattr(evaljs, "x", None)
evaljs.execute(js_content)
js_content = getattr(evaljs, "x")
return f"https://{link}.zippyshare.com{js_content}"
def yandex_disk(url: str) -> str:
""" Yandex.Disk direct links generator
Based on https://github.com/wldhx/yadisk-direct"""
try:
link = re.findall(r'\bhttps?://.*yadi\.sk\S+', url)[0]
except IndexError:
reply = "`No Yandex.Disk links found`\n"
return reply
api = 'https://cloud-api.yandex.net/v1/disk/public/resources/download?public_key={}'
try:
dl_url = requests.get(api.format(link)).json()['href']
return dl_url
except KeyError:
raise DirectDownloadLinkException("`Error: File not found / Download limit reached`\n")
def cm_ru(url: str) -> str:
""" cloud.mail.ru direct links generator
Using https://github.com/JrMasterModelBuilder/cmrudl.py"""
reply = ''
try:
link = re.findall(r'\bhttps?://.*cloud\.mail\.ru\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No cloud.mail.ru links found`\n")
command = f'vendor/cmrudl.py/cmrudl -s {link}'
result = popen(command).read()
result = result.splitlines()[-1]
try:
data = json.loads(result)
except json.decoder.JSONDecodeError:
raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
dl_url = data['download']
return dl_url
def mediafire(url: str) -> str:
""" MediaFire direct links generator """
try:
link = re.findall(r'\bhttps?://.*mediafire\.com\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No MediaFire links found`\n")
page = BeautifulSoup(requests.get(link).content, 'lxml')
info = page.find('a', {'aria-label': 'Download file'})
dl_url = info.get('href')
return dl_url
def uptobox(url: str) -> str:
try:
link = re.findall(r'\bhttps?://.*uptobox\.com\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No Uptobox links found`\n")
if UPTOBOX_TOKEN is None:
logging.error('UPTOBOX_TOKEN not provided!')
else:
check = 'https://uptobox.com/api/user/me?token=%s' % (UPTOBOX_TOKEN)
request = requests.get(check)
info = request.json()
premium = info["data"]["premium"]
try:
link = re.findall(r'\bhttp?://.*uptobox\.com/dl\S+', url)[0]
logging.info('Uptobox direct link')
dl_url = url
except:
if premium == 1:
file_id = re.findall(r'\bhttps?://.*uptobox\.com/(\w+)', url)[0]
file_link = 'https://uptobox.com/api/link?token=%s&file_code=%s' % (UPTOBOX_TOKEN, file_id)
req = requests.get(file_link)
result = req.json()
dl_url = result['data']['dlLink']
else:
file_id = re.findall(r'\bhttps?://.*uptobox\.com/(\w+)', url)[0]
file_link = 'https://uptobox.com/api/link?token=%s&file_code=%s' % (UPTOBOX_TOKEN, file_id)
req = requests.get(file_link)
result = req.json()
waiting_time = result["data"]["waiting"] + 1
waiting_token = result["data"]["waitingToken"]
_countdown(waiting_time)
file_link = 'https://uptobox.com/api/link?token=%s&file_code=%s&waitingToken=%s' % (UPTOBOX_TOKEN, file_id, waiting_token)
req = requests.get(file_link)
result = req.json()
dl_url = result['data']['dlLink']
return dl_url
def osdn(url: str) -> str:
""" OSDN direct links generator """
osdn_link = 'https://osdn.net'
try:
link = re.findall(r'\bhttps?://.*osdn\.net\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No OSDN links found`\n")
page = BeautifulSoup(
requests.get(link, allow_redirects=True).content, 'lxml')
info = page.find('a', {'class': 'mirror_link'})
link = urllib.parse.unquote(osdn_link + info['href'])
mirrors = page.find('form', {'id': 'mirror-select-form'}).findAll('tr')
urls = []
for data in mirrors[1:]:
mirror = data.find('input')['value']
urls.append(re.sub(r'm=(.*)&f', f'm={mirror}&f', link))
return urls[0]
def github(url: str) -> str:
""" GitHub direct links generator """
try:
re.findall(r'\bhttps?://.*github\.com.*releases\S+', url)[0]
except IndexError:
raise DirectDownloadLinkException("`No GitHub Releases links found`\n")
download = requests.get(url, stream=True, allow_redirects=False)
try:
dl_url = download.headers["location"]
return dl_url
except KeyError:
raise DirectDownloadLinkException("`Error: Can't extract the link`\n")
def useragent():
"""
useragent random setter
"""
useragents = BeautifulSoup(
requests.get(
'https://developers.whatismybrowser.com/'
'useragents/explore/operating_system_name/android/').content,
'lxml').findAll('td', {'class': 'useragent'})
user_agent = choice(useragents)
return user_agent.text
def fembed(link: str) -> str:
""" Fembed direct link generator
Based on https://github.com/breakdowns/slam-mirrorbot """
bypasser = lk21.Bypass()
dl_url=bypasser.bypass_fembed(link)
lst_link = []
count = len(dl_url)
for i in dl_url:
lst_link.append(dl_url[i])
return lst_link[count-1] | PypiClean |
/MetaNN-0.3.2-py3-none-any.whl/metann/dependentmodule.py | from collections import OrderedDict
from copy import deepcopy
import torch
from torch.nn import Module
from torch._six import string_classes
from .utils import SubDict
from typing import Dict, List, Optional, Tuple, Union, Any
class DependentModule(Module):
r"""
The PyTorch sugggest all parameters of a module to be independent variables, and forbid a parameter to have a grad_fn.
This module provides an extension to nn.Module by register a subset of buffers as **dependents**,
which indicates the dependent parameters.
This enables the parameters of a DependentModule to be the dependent variables, which is useful in meta learning.
This module calls DependentModule.to_dependentmodule when it is created. It turns the module and all of its
submodules into sub class of DependentModule.
Then you might use clear_params to transform all parameters to dependents.
Examples:
>>> net = Sequential(Linear(10, 5), Linear(5, 2))
>>> DependentModule(net)
DependentSequential(
(0): DependentLinear(in_features=10, out_features=5, bias=True)
(1): DependentLinear(in_features=5, out_features=2, bias=True)
)
Note:
This class change the origin module when initializing, you might use
>>> DependentModule(deepcopy(net))
if you want the origin model stay unchanged.
"""
def __new__(cls, *args, **kwargs):
if len(args) == 1 and isinstance(args[0], Module):
module = cls.to_dependentmodule(args[0])
else:
module = super(DependentModule, cls).__new__(cls, *args, **kwargs)
return module
def __init__(self, *args, **kwargs) -> None:
self._reinit()
def _reinit(self) -> None:
self._dependents = SubDict(self._buffers)
self._active_dependents = SubDict(self._dependents)
self._dependents_shapes = {}
def __setattr__(self, name: str, value: Any) -> None:
if isinstance(value, Module):
value = self.to_dependentmodule(value)
super(DependentModule, self).__setattr__(name, value)
def register_dependent(self, name: str, tensor: torch.Tensor) -> None:
r"""
register a named tensor to dependents.
Args:
name: name of dependent tensor
tensor (torch.Tensor): dependent tensor
Examples:
>>> dnet = DependentModule(net)
>>> dnet.register_dependent('some_tensor', torch.randn(3, 3))
>>> dnet.some_tensor
tensor([[ 0.4434, 0.9949, -0.4385],
[-0.5292, 0.2555, 0.7772],
[-0.5386, 0.6152, -0.3239]])
"""
if '_dependents' not in self.__dict__:
raise AttributeError(
"cannot assign dependent parameter before MetaModule.__init__() or MetaModule._reinit() call")
elif not isinstance(name, torch._six.string_classes):
raise TypeError("dependent parameter name should be a string. "
"Got {}".format(torch.typename(name)))
elif '.' in name:
raise KeyError("dependent parameter name can't contain \".\"")
elif name == '':
raise KeyError("dependent parameter name can't be empty string \"\"")
elif hasattr(self, name) and not name in self._dependents:
raise KeyError("attribute '{}' already exists".format(name))
elif tensor is not None and not isinstance(tensor, torch.Tensor):
raise TypeError("cannot assign '{}' object to dependent parameter '{}' "
"(torch Tensor or None required)"
.format(torch.typename(tensor), name))
else:
if tensor is not None:
self._active_dependents[name] = tensor
self._dependents_shapes[name] = tensor.shape
else:
self._dependents[name] = tensor
def named_dependents(self, prefix: str='', recurse=True):
r"""
Args:
prefix: the prefix of the names
recurse: traverse only the direct submodules of self if set to False
Returns:
Iterative: iterator of name, dependent pairs of self and sub modules.
"""
memo = set()
modules = self.named_modules(prefix=prefix) if recurse else [(prefix, self)]
for module_prefix, module in modules:
members = (lambda module: module._active_dependents.items())(module)
for k, v in members:
if v in memo and v is not None:
continue
memo.add(v)
name = module_prefix + ('.' if module_prefix else '') + k
yield name, v
def dependents(self, recurse=True):
r"""
Args:
recurse: traverse only the direct submodules of self if set to False
Returns:
Iterative: iterator of dependents of self and sub modules.
"""
for name, param in self.named_dependents(recurse=recurse):
yield param
def update_shapes(self):
r"""
Update the register shape of dependents. Call this method when a dependent is initialize with None and assign
to a tensor. **Do not** call this method when you are using built-in methods only.
"""
def gen():
for name, value in self._active_dependents.items():
if value is None:
if name in self._dependents_shapes:
yield name, self._dependents_shapes[name]
else:
continue
else:
yield name, value.shape
self._dependents_shapes = dict(gen())
def _substitute(self, name, value):
if name not in self._dependents:
raise KeyError("{} is not in dependent parameters".format(name))
elif name in self._dependents_shapes.keys() and self._dependents_shapes[name] != value.shape:
raise ValueError("size mismatch for {}, expect {}, got {}".format(
name, self._dependents_shapes[name], value.shape))
self._dependents[name] = value
def _substitute_from_params_dict(self, params_dict, prefix, strict=True):
for name in self._dependents:
key = prefix + name
if strict == True:
if key in params_dict:
self._substitute(name, params_dict[key])
else:
raise ValueError("params_dict and interim parameters mismatch, got {}".format(key))
elif strict == 'one way':
if key in params_dict:
self._substitute(name, params_dict[key])
else:
if key in params_dict:
try:
self._substitute(name, params_dict[key])
except (KeyError, ValueError):
pass
def substitute(self, named_params, strict=True):
r"""
Substitute self's dependents with the tensors of same name
Args:
named_params: iterator of name, tensor pairs
strict (bool): forbid named_params and self._dependents mismatch if set to True. default: True
"""
params_dict = dict(named_params)
def load(module: DependentModule, prefix='', _strict=True):
module._substitute_from_params_dict(params_dict, prefix, strict=_strict)
for name, child in module._modules.items():
load(child, prefix + name + '.', _strict=_strict)
load(self, _strict=strict)
def substitute_from_list(self, params):
r"""
Substitute from tensor list.
Args:
params: iterator of tensors
"""
named_params = ((k, v) for (k, _), v in zip(self.named_dependents(), params))
self.substitute(named_params, strict='one way')
def update_actives(self):
keys = set()
for key in self._dependents.keys():
if isinstance(self._dependents[key], torch.Tensor):
keys.add(key)
self._active_dependents = SubDict(self._dependents, keys)
def clear_params(self, init=False, clear_filter=lambda x: True):
r"""
Clear all parameters of self and register them as dependents.
Args:
init (bool): Set the values of dependents to None if set to False, otherwise keep the value of origin parameters.
clear_filter: Function that return False when those modules you don't want to clear parameters are input
"""
def clear_fn(module: DependentModule):
if clear_filter(module):
for name, value in module._parameters.items():
module._dependents[name] = value.clone().detach().requires_grad_() if value is not None else None
module._parameters = OrderedDict()
module.update_actives()
module.update_shapes()
if not init:
for key in module._dependents:
module._dependents[key] = None
self.apply(clear_fn)
return self
@classmethod
def _sub_class(cls, module: Module):
if not isinstance(module, DependentModule):
return type("Dependent" + type(module).__name__, (DependentModule, type(module)), {})
else:
return type(module)
@classmethod
def _make_subclass(cls, module: Module):
if not isinstance(module, cls):
module.__class__ = type("Dependent" + type(module).__name__, (cls, type(module)), {})
module._reinit()
return module
@classmethod
def to_dependentmodule(cls, module: Module, recurse=True):
r"""
Transform a module and all its submodule into dependent module.
Args:
module:
recurse: if set to be True all submodules will be transformed into dependent module recursively.
Returns:
DependentModule: a dependent module
"""
if not recurse:
module = cls._make_subclass(module)
else:
module.apply(lambda x: cls.to_dependentmodule(x, recurse=False))
return module
@classmethod
def stateless(cls, module: Module, clear_filter=lambda x: True):
r"""
transform input module into a DependentModule whose parameters are cleared.
Args:
module:
clear_filter: Function that return False when those modules you don't want to clear parameters are input
"""
return cls.to_dependentmodule(deepcopy(module)).clear_params(clear_filter) | PypiClean |
/Flask-CKEditor-0.4.6.tar.gz/Flask-CKEditor-0.4.6/flask_ckeditor/static/basic/lang/en-au.js | /*
Copyright (c) 2003-2020, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.md or https://ckeditor.com/license
*/
CKEDITOR.lang['en-au']={"editor":"Rich Text Editor","editorPanel":"Rich Text Editor panel","common":{"editorHelp":"Press ALT 0 for help","browseServer":"Browse Server","url":"URL","protocol":"Protocol","upload":"Upload","uploadSubmit":"Send it to the Server","image":"Image","flash":"Flash","form":"Form","checkbox":"Checkbox","radio":"Radio Button","textField":"Text Field","textarea":"Textarea","hiddenField":"Hidden Field","button":"Button","select":"Selection Field","imageButton":"Image Button","notSet":"<not set>","id":"Id","name":"Name","langDir":"Language Direction","langDirLtr":"Left to Right (LTR)","langDirRtl":"Right to Left (RTL)","langCode":"Language Code","longDescr":"Long Description URL","cssClass":"Stylesheet Classes","advisoryTitle":"Advisory Title","cssStyle":"Style","ok":"OK","cancel":"Cancel","close":"Close","preview":"Preview","resize":"Resize","generalTab":"General","advancedTab":"Advanced","validateNumberFailed":"This value is not a number.","confirmNewPage":"Any unsaved changes to this content will be lost. Are you sure you want to load new page?","confirmCancel":"You have changed some options. Are you sure you want to close the dialog window?","options":"Options","target":"Target","targetNew":"New Window (_blank)","targetTop":"Topmost Window (_top)","targetSelf":"Same Window (_self)","targetParent":"Parent Window (_parent)","langDirLTR":"Left to Right (LTR)","langDirRTL":"Right to Left (RTL)","styles":"Style","cssClasses":"Stylesheet Classes","width":"Width","height":"Height","align":"Align","left":"Left","right":"Right","center":"Centre","justify":"Justify","alignLeft":"Align Left","alignRight":"Align Right","alignCenter":"Align Centre","alignTop":"Top","alignMiddle":"Middle","alignBottom":"Bottom","alignNone":"None","invalidValue":"Invalid value.","invalidHeight":"Height must be a number.","invalidWidth":"Width must be a number.","invalidLength":"Value specified for the \"%1\" field must be a positive number with or without a valid measurement unit (%2).","invalidCssLength":"Value specified for the \"%1\" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).","invalidHtmlLength":"Value specified for the \"%1\" field must be a positive number with or without a valid HTML measurement unit (px or %).","invalidInlineStyle":"Value specified for the inline style must consist of one or more tuples with the format of \"name : value\", separated by semi-colons.","cssLengthTooltip":"Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).","unavailable":"%1<span class=\"cke_accessibility\">, unavailable</span>","keyboard":{"8":"Backspace","13":"Enter","16":"Shift","17":"Ctrl","18":"Alt","32":"Space","35":"End","36":"Home","46":"Delete","112":"F1","113":"F2","114":"F3","115":"F4","116":"F5","117":"F6","118":"F7","119":"F8","120":"F9","121":"F10","122":"F11","123":"F12","124":"F13","125":"F14","126":"F15","127":"F16","128":"F17","129":"F18","130":"F19","131":"F20","132":"F21","133":"F22","134":"F23","135":"F24","224":"Command"},"keyboardShortcut":"Keyboard shortcut","optionDefault":"Default"},"about":{"copy":"Copyright © $1. All rights reserved.","dlgTitle":"About CKEditor 4","moreInfo":"For licensing information please visit our web site:"},"basicstyles":{"bold":"Bold","italic":"Italic","strike":"Strike Through","subscript":"Subscript","superscript":"Superscript","underline":"Underline"},"notification":{"closed":"Notification closed."},"toolbar":{"toolbarCollapse":"Collapse Toolbar","toolbarExpand":"Expand Toolbar","toolbarGroups":{"document":"Document","clipboard":"Clipboard/Undo","editing":"Editing","forms":"Forms","basicstyles":"Basic Styles","paragraph":"Paragraph","links":"Links","insert":"Insert","styles":"Styles","colors":"Colors","tools":"Tools"},"toolbars":"Editor toolbars"},"clipboard":{"copy":"Copy","copyError":"Your browser security settings don't permit the editor to automatically execute copying operations. Please use the keyboard for that (Ctrl/Cmd+C).","cut":"Cut","cutError":"Your browser security settings don't permit the editor to automatically execute cutting operations. Please use the keyboard for that (Ctrl/Cmd+X).","paste":"Paste","pasteNotification":"Press %1 to paste. Your browser doesn‘t support pasting with the toolbar button or context menu option.","pasteArea":"Paste Area","pasteMsg":"Paste your content inside the area below and press OK."},"indent":{"indent":"Increase Indent","outdent":"Decrease Indent"},"fakeobjects":{"anchor":"Anchor","flash":"Flash Animation","hiddenfield":"Hidden Field","iframe":"IFrame","unknown":"Unknown Object"},"link":{"acccessKey":"Access Key","advanced":"Advanced","advisoryContentType":"Advisory Content Type","advisoryTitle":"Advisory Title","anchor":{"toolbar":"Anchor","menu":"Edit Anchor","title":"Anchor Properties","name":"Anchor Name","errorName":"Please type the anchor name","remove":"Remove Anchor"},"anchorId":"By Element Id","anchorName":"By Anchor Name","charset":"Linked Resource Charset","cssClasses":"Stylesheet Classes","download":"Force Download","displayText":"Display Text","emailAddress":"E-Mail Address","emailBody":"Message Body","emailSubject":"Message Subject","id":"Id","info":"Link Info","langCode":"Language Code","langDir":"Language Direction","langDirLTR":"Left to Right (LTR)","langDirRTL":"Right to Left (RTL)","menu":"Edit Link","name":"Name","noAnchors":"(No anchors available in the document)","noEmail":"Please type the e-mail address","noUrl":"Please type the link URL","noTel":"Please type the phone number","other":"<other>","phoneNumber":"Phone number","popupDependent":"Dependent (Netscape)","popupFeatures":"Popup Window Features","popupFullScreen":"Full Screen (IE)","popupLeft":"Left Position","popupLocationBar":"Location Bar","popupMenuBar":"Menu Bar","popupResizable":"Resizable","popupScrollBars":"Scroll Bars","popupStatusBar":"Status Bar","popupToolbar":"Toolbar","popupTop":"Top Position","rel":"Relationship","selectAnchor":"Select an Anchor","styles":"Style","tabIndex":"Tab Index","target":"Target","targetFrame":"<frame>","targetFrameName":"Target Frame Name","targetPopup":"<popup window>","targetPopupName":"Popup Window Name","title":"Link","toAnchor":"Link to anchor in the text","toEmail":"E-mail","toUrl":"URL","toPhone":"Phone","toolbar":"Link","type":"Link Type","unlink":"Unlink","upload":"Upload"},"list":{"bulletedlist":"Insert/Remove Bulleted List","numberedlist":"Insert/Remove Numbered List"},"undo":{"redo":"Redo","undo":"Undo"}}; | PypiClean |
/FishFishJump-0.2.3.tar.gz/FishFishJump-0.2.3/fish_crawlers/slave_crawler/slave_crawler/spiders/crawler.py | import time
from scrapy.contrib.linkextractors.lxmlhtml import LxmlLinkExtractor
from scrapy.spiders import Rule
from scrapy_redis.spiders import RedisCrawlSpider
from fish_core.scrapy.items import CommonItem
from fish_core.simhash import Simhash
class SimpleCrawler(RedisCrawlSpider):
"""
A simple example for distributed crawler,
it would extract the attribute from a page such as title, description, keywords....
"""
name = 'simple_fish_crawler'
redis_key = 'simple_fish_crawler:start_urls'
rules = (
Rule(LxmlLinkExtractor(), callback='parse_page', follow=True),
)
def parse_page(self, response):
self.logger.debug('Parse function called on %s ' % response.url)
item = CommonItem()
item['title'] = ''.join(response.xpath('//title/text()').extract())
item['description'] = ''.join(response.xpath('//meta[contains(@name,"description")]/@content').extract())
item['keywords'] = ''.join(response.xpath('//meta[contains(@name,"keywords")]/@content').extract())
item['p_texts'] = response.xpath('//p/text()').extract()
item['url'] = response.url
item['crawled_timestamp'] = time.time()
item['links'], item['links_text'] = self.parse_links(response.xpath('//a[contains(@href,"http")]'))
item['simhash'] = self.generate_simhash(item)
self.logger.debug('Parse done...........')
return item
def parse_links(self, a_list):
links, links_text = [], []
for a in a_list:
links.append(''.join(a.xpath('@href').extract()))
links_text.append(''.join(a.xpath('text()').extract()))
return links, links_text
def generate_simhash(self, item):
"""
Generate simhash based on title, description, keywords, p_texts and links_text.
"""
list = item['p_texts'] + item['links_text']
list.append(item['title'])
list.append(item['description'])
list.append(item['keywords'])
return Simhash(','.join(list).strip()).hash | PypiClean |
/FastNLP-1.0.1.tar.gz/FastNLP-1.0.1/fastNLP/transformers/torch/activations.py |
import math
from packaging import version
from fastNLP.envs.imports import _NEED_IMPORT_TORCH
from fastNLP.core.log import logger
if _NEED_IMPORT_TORCH:
import torch
from torch import nn, tanh, sigmoid
from torch.nn.functional import relu
else:
from fastNLP.core.utils.dummy_class import (
DummyClass as relu,
DummyClass as tanh,
DummyClass as sigmoid,
)
def _gelu_python(x):
"""
Original Implementation of the GELU activation function in Google BERT repo when initially created. For
information: OpenAI GPT's GELU is slightly different (and gives slightly different results): 0.5 * x * (1 +
torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in nn.functional
Also see the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0)))
def gelu_new(x):
"""
Implementation of the GELU activation function currently in Google BERT repo (identical to OpenAI GPT). Also see
the Gaussian Error Linear Units paper: https://arxiv.org/abs/1606.08415
"""
return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0))))
if _NEED_IMPORT_TORCH:
if version.parse(torch.__version__) < version.parse("1.4"):
gelu = _gelu_python
else:
gelu = nn.functional.gelu
else:
from fastNLP.core.utils.dummy_class import DummyClass as gelu
def gelu_fast(x):
return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
def quick_gelu(x):
return x * torch.sigmoid(1.702 * x)
def _silu_python(x):
"""
See Gaussian Error Linear Units (Hendrycks et al., https://arxiv.org/abs/1606.08415) where the SiLU (Sigmoid Linear
Unit) was originally introduced and coined, and see Sigmoid-Weighted Linear Units for Neural Network Function
Approximation in Reinforcement Learning (Elfwing et al., https://arxiv.org/abs/1702.03118) and Swish: a Self-Gated
Activation Function (Ramachandran et al., https://arxiv.org/abs/1710.05941v1) where the SiLU was experimented with
later.
"""
return x * torch.sigmoid(x)
if _NEED_IMPORT_TORCH:
if version.parse(torch.__version__) < version.parse("1.7"):
silu = _silu_python
else:
silu = nn.functional.silu
else:
from fastNLP.core.utils.dummy_class import DummyClass as silu
def _mish_python(x):
"""
See Mish: A Self-Regularized Non-Monotonic Activation Function (Misra., https://arxiv.org/abs/1908.08681). Also
visit the official repository for the paper: https://github.com/digantamisra98/Mish
"""
return x * torch.tanh(nn.functional.softplus(x))
if _NEED_IMPORT_TORCH:
if version.parse(torch.__version__) < version.parse("1.9"):
mish = _mish_python
else:
mish = nn.functional.mish
else:
from fastNLP.core.utils.dummy_class import DummyClass as mish
def linear_act(x):
return x
ACT2FN = {
"relu": relu,
"silu": silu,
"swish": silu,
"gelu": gelu,
"tanh": tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
"quick_gelu": quick_gelu,
"mish": mish,
"linear": linear_act,
"sigmoid": sigmoid,
}
def get_activation(activation_string):
if activation_string in ACT2FN:
return ACT2FN[activation_string]
else:
raise KeyError(f"function {activation_string} not found in ACT2FN mapping {list(ACT2FN.keys())}") | PypiClean |
/DeeFuzzer-0.8.2.tar.gz/DeeFuzzer-0.8.2/deefuzzer/tools/webm.py |
# <[email protected]>
# This software is a computer program whose purpose is to stream audio
# and video data through icecast2 servers.
# This software is governed by the CeCILL license under French law and
# abiding by the rules of distribution of free software. You can use,
# modify and/ or redistribute the software under the terms of the CeCILL
# license as circulated by CEA, CNRS and INRIA at the following URL
# "http://www.cecill.info".
# As a counterpart to the access to the source code and rights to copy,
# modify and redistribute granted by the license, users are provided only
# with a limited warranty and the software's author, the holder of the
# economic rights, and the successive licensors have only limited
# liability.
# In this respect, the user's attention is drawn to the risks associated
# with loading, using, modifying and/or developing or reproducing the
# software by the user in light of its specific status of free software,
# that may mean that it is complicated to manipulate, and that also
# therefore means that it is reserved for developers and experienced
# professionals having in-depth computer knowledge. Users are therefore
# encouraged to load and test the software's suitability as regards their
# requirements in conditions enabling the security of their systems and/or
# data to be ensured and, more generally, to use and operate it in the
# same conditions as regards security.
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# Author: Guillaume Pellerin <[email protected]>
import os
import string
import datetime
from .utils import *
class WebM(MediaBase):
"""An WebM file object"""
def __init__(self, media):
MediaBase.__init__(self)
self.description = "WebM"
self.mime_type = 'video/webm'
self.extension = 'webm'
self.format = 'WebM'
self.media = media
self.source = self.media
self.file_name, self.file_title, self.file_ext = get_file_info(media) | PypiClean |
/FileCrawler-0.1.8.tar.gz/FileCrawler-0.1.8/filecrawler/libs/process.py | import platform
import tempfile
import time
import signal
import os
from pathlib import Path
from subprocess import Popen, PIPE
from filecrawler.config import Configuration
from filecrawler.libs.color import Color
from filecrawler.libs.logger import Logger
class Process(object):
''' Represents a running/ran process '''
@staticmethod
def devnull():
''' Helper method for opening devnull '''
return open('/dev/null', 'w')
@staticmethod
def call(command, cwd=None, shell=False):
'''
Calls a command (either string or list of args).
Returns tuple:
(stdout, stderr)
'''
if type(command) is not str or ' ' in command or shell:
shell = True
if Configuration.verbose > 1:
Logger.debug("Executing (Shell): {G}%s" % command)
else:
shell = False
if Configuration.verbose > 1:
Logger.debug("Executing (Shell): {G}%s" % command)
# it cause hang on windows
#pid = Popen(command, cwd=cwd, stdout=PIPE, stderr=PIPE, shell=shell)
#retcode = pid.wait()
#(stdout, stderr) = pid.communicate()
my_env = os.environ.copy()
my_env["PATH"] = Process.get_path()
with tempfile.NamedTemporaryFile(mode="w+") as tmp_out, tempfile.NamedTemporaryFile(mode="w+") as tmp_err:
pid = Popen(command, env=my_env, cwd=cwd, stdout=tmp_out, stderr=tmp_err, shell=shell)
retcode = pid.wait()
# Cursor is after the last write call, reset to read output
tmp_out.seek(0)
tmp_err.seek(0)
stdout = tmp_out.read()
stderr = tmp_err.read()
if type(stdout) is bytes: stdout = stdout.decode('utf-8')
if type(stderr) is bytes: stderr = stderr.decode('utf-8')
if Configuration.verbose > 1 and stdout is not None and stdout.strip() != '':
Color.pe("{P} [stdout] %s{W}" % '\n [stdout] '.join(stdout.strip().split('\n')))
if Configuration.verbose > 1 and stderr is not None and stderr.strip() != '':
Color.pe("{P} [stderr] %s{W}" % '\n [stderr] '.join(stderr.strip().split('\n')))
return (retcode, stdout, stderr)
@staticmethod
def get_path():
p = platform.system().lower()
if p == 'darwin':
p = 'macosx'
bin_path = Path(os.path.dirname(__file__) + f'../libs/bin/')
bin_path2 = Path(os.path.dirname(__file__) + f'../libs/bin/{p}')
my_env = os.environ.copy()
return f"{bin_path}:{bin_path2}:" + my_env["PATH"]
@staticmethod
def exists(program):
''' Checks if program is installed on this system '''
#p = Process(['which', program])
#stdout = p.stdout().strip()
#stderr = p.stderr().strip()
#if stdout == '' and stderr == '':
# return False
#return True
from shutil import which
return which(program, path=Process.get_path()) is not None
def __init__(self, command, devnull=False, stdout=PIPE, stderr=PIPE, cwd=None, bufsize=0):
''' Starts executing command '''
if type(command) is str:
# Commands have to be a list
command = command.split(' ')
self.command = command
if Configuration.verbose > 1:
Color.pe("\n {C}[?] {W} Executing: {B}%s{W}" % ' '.join(command))
self.out = None
self.err = None
if devnull:
sout = Process.devnull()
serr = Process.devnull()
else:
sout = stdout
serr = stderr
self.start_time = time.time()
self.pid = Popen(command, stdout=sout, stderr=serr, cwd=cwd, bufsize=bufsize)
def __del__(self):
'''
Ran when object is GC'd.
If process is still running at this point, it should die.
'''
if self.pid and self.pid.poll() is None:
self.interrupt()
def stdout(self):
''' Waits for process to finish, returns stdout output '''
self.get_output()
if Configuration.verbose > 1 and self.out is not None and self.out.strip() != '':
Color.pe("{P} [stdout] %s{W}" % '\n [stdout] '.join(self.out.strip().split('\n')))
return self.out
def stderr(self):
''' Waits for process to finish, returns stderr output '''
self.get_output()
if Configuration.verbose > 1 and self.err is not None and self.err.strip() != '':
Color.pe("{P} [stderr] %s{W}" % '\n [stderr] '.join(self.err.strip().split('\n')))
return self.err
def stdoutln(self):
return self.pid.stdout.readline()
def stderrln(self):
return self.pid.stderr.readline()
def get_output(self):
''' Waits for process to finish, sets stdout & stderr '''
if self.pid.poll() is None:
self.pid.wait()
if self.out is None:
(self.out, self.err) = self.pid.communicate()
if type(self.out) is bytes:
self.out = self.out.decode('utf-8')
if type(self.err) is bytes:
self.err = self.err.decode('utf-8')
return (self.out, self.err)
def poll(self):
''' Returns exit code if process is dead, otherwise "None" '''
return self.pid.poll()
def wait(self):
self.pid.wait()
def running_time(self):
''' Returns number of seconds since process was started '''
return int(time.time() - self.start_time)
def interrupt(self, wait_time=2.0):
'''
Send interrupt to current process.
If process fails to exit within `wait_time` seconds, terminates it.
'''
try:
pid = self.pid.pid
cmd = self.command
if type(cmd) is list:
cmd = ' '.join(cmd)
if Configuration.verbose > 1:
Color.pe('\n {C}[?] {W} sending interrupt to PID %d (%s)' % (pid, cmd))
os.kill(pid, signal.SIGINT)
start_time = time.time() # Time since Interrupt was sent
while self.pid.poll() is None:
# Process is still running
time.sleep(0.1)
if time.time() - start_time > wait_time:
# We waited too long for process to die, terminate it.
if Configuration.verbose > 1:
Color.pe('\n {C}[?] {W} Waited > %0.2f seconds for process to die, killing it' % wait_time)
os.kill(pid, signal.SIGTERM)
self.pid.terminate()
break
except OSError as e:
if 'No such process' in e.__str__():
return
raise e # process cannot be killed
@staticmethod
def kill(pid=0):
''' Deletes temp and exist with the given code '''
os.kill(os.getpid() if pid == 0 else pid, signal.SIGTERM)
@staticmethod
def list_process():
import psutil
# Iterate over all running process
for proc in psutil.process_iter():
try:
# Get process name & pid from process object.
cmdline = proc.cmdline()
if isinstance(cmdline, list):
cmdline = ' '.join(cmdline)
yield proc.pid, proc.name(), cmdline
except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess):
pass
@staticmethod
def find_process(name: str):
for p in Process.list_process():
if name in p[1] or name in p[2]:
return p
return None | PypiClean |
/OBITools-1.2.13.tar.gz/OBITools-1.2.13/doc/sphinx/source/scripts/ecodbtaxstat.rst | .. automodule:: ecodbtaxstat
:py:mod:`ecodbtaxstat` specific option
--------------------------------------
.. cmdoption:: --rank=<TAXONOMIC_RANK>
The taxonomic rank at which frequencies have to be computed.
Possible values are:
- class
- family
- forma
- genus
- infraclass
- infraorder
- kingdom
- order
- parvorder
- phylum
- species (default)
- species group
- species subgroup
- subclass
- subfamily
- subgenus
- subkingdom
- suborder
- subphylum
- subspecies
- subtribe
- superclass
- superfamily
- superkingdom
- superorder
- superphylum
- tribe
- varietas
.. include:: ../optionsSet/taxonomyFilter.txt
:py:mod:`ecodbtaxstat` used sequence attributes
-----------------------------------------------
- :doc:`taxid <../attributes/taxid>`
| PypiClean |
/Cartridge-1.3.4-py3-none-any.whl/cartridge/shop/utils.py | import hmac
from hashlib import sha512 as digest
from locale import LC_MONETARY
from locale import Error as LocaleError
from locale import setlocale
from django.core.exceptions import ImproperlyConfigured
from django.utils.translation import gettext as _
from mezzanine.conf import settings
from mezzanine.utils.importing import import_dotted_path
def make_choices(choices):
"""
Zips a list with itself for field choices.
"""
return list(zip(choices, choices))
def clear_session(request, *names):
"""
Removes values for the given session variables names
if they exist.
"""
for name in names:
try:
del request.session[name]
except KeyError:
pass
def recalculate_cart(request):
"""
Updates an existing discount code, shipping, and tax when the
cart is modified.
"""
from cartridge.shop import checkout
from cartridge.shop.forms import DiscountForm
from cartridge.shop.models import Cart
# Rebind the cart to request since it's been modified.
if request.session.get("cart") != request.cart.pk:
request.session["cart"] = request.cart.pk
request.cart = Cart.objects.from_request(request)
discount_code = request.session.get("discount_code", "")
if discount_code:
# Clear out any previously defined discount code
# session vars.
names = ("free_shipping", "discount_code", "discount_total")
clear_session(request, *names)
discount_form = DiscountForm(request, {"discount_code": discount_code})
if discount_form.is_valid():
discount_form.set_discount()
handler = lambda s: import_dotted_path(s) if s else lambda *args: None
billship_handler = handler(settings.SHOP_HANDLER_BILLING_SHIPPING)
tax_handler = handler(settings.SHOP_HANDLER_TAX)
try:
if request.session["order"]["step"] >= checkout.CHECKOUT_STEP_FIRST:
billship_handler(request, None)
tax_handler(request, None)
except (checkout.CheckoutError, ValueError, KeyError):
pass
def set_shipping(request, shipping_type, shipping_total):
"""
Stores the shipping type and total in the session.
"""
request.session["shipping_type"] = str(shipping_type)
request.session["shipping_total"] = str(shipping_total)
def set_tax(request, tax_type, tax_total):
"""
Stores the tax type and total in the session.
"""
request.session["tax_type"] = str(tax_type)
request.session["tax_total"] = str(tax_total)
def sign(value):
"""
Returns the hash of the given value, used for signing order key stored in
cookie for remembering address fields.
"""
key = bytes(settings.SECRET_KEY, encoding="utf8")
value = bytes(value, encoding="utf8")
return hmac.new(key, value, digest).hexdigest()
def set_locale():
"""
Sets the locale for currency formatting.
"""
currency_locale = str(settings.SHOP_CURRENCY_LOCALE)
try:
if setlocale(LC_MONETARY, currency_locale) == "C":
# C locale doesn't contain a suitable value for "frac_digits".
raise LocaleError
except LocaleError:
msg = _(
"Invalid currency locale specified for SHOP_CURRENCY_LOCALE: "
"'%s'. You'll need to set the locale for your system, or "
"configure the SHOP_CURRENCY_LOCALE setting in your settings "
"module."
)
raise ImproperlyConfigured(msg % currency_locale) | PypiClean |
/Flask-Ask-alphavideo-2.1.0.tar.gz/Flask-Ask-alphavideo-2.1.0/docs/_themes/flask_theme_support.py | from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
} | PypiClean |
/MI_RLMS_MO-0.0.1-py3-none-any.whl/MI_RLMS_MO.py |
def summarize(input_eng):
try:
import requests
from bs4 import BeautifulSoup
from googletrans import Translator
import warnings
warnings.filterwarnings("ignore")
import warnings,logging
warnings.simplefilter('ignore')
logging.disable(logging.WARNING)
# from transformers import pipeline
# caption = pipeline('image-to-text')
from transformers import pipeline
#sentiment = pipeline("sentiment-analysis")
from transformers import PegasusForConditionalGeneration, AutoTokenizer
# tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
from transformers import PegasusForConditionalGeneration, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
# tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-xsum")
model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
from sentence_transformers import SentenceTransformer
#sbert_model = SentenceTransformer('bert-base-nli-mean-tokens')
# from transformers import pipeline
# sentiment = pipeline("sentiment-analysis")
from transformers import PegasusForConditionalGeneration, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
warnings.filterwarnings("ignore")
# from gensim.summarization.summarizer import summarize
from gensim.summarization import keywords
from textblob import TextBlob
translator = Translator()
# from transformers import pipeline
# summarizer = pipeline("summarization")
from transformers import PegasusForConditionalGeneration, PegasusTokenizer
from transformers import PegasusForConditionalGeneration, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained("google/pegasus-xsum")
# tokenizer = PegasusTokenizer.from_pretrained("google/pegasus-xsum")
model = PegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum")
translation=translator.translate(input_eng, dest = "en")
tokens = tokenizer(translation.text, truncation=True, padding="longest", return_tensors="pt")
# Summarize
summary = model.generate(**tokens)
# Decode summary
text = tokenizer.decode(summary[0]).replace("<pad> ","").replace("</s>","")
return {"Summary":text.strip()}
except Exception as e:
raise e
# input_eng = input()
# summarize(input_eng)
# import gradio as gr
# interface = gr.Interface(fn=summarize,
# inputs=gr.inputs.Textbox(lines=20, placeholder='Past your input text...'),outputs=['text',"text","text"])
# interface.launch(share = True, debug = False)
# https://www.voanews.com/
def get_summary_multioutput():
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import requests
import nltk
nltk.download('punkt')
from bs4 import BeautifulSoup
import urllib
import numpy as np
# from PIL import Image
from IPython.display import Image, display
from transformers import pipeline
# caption = pipeline('image-to-text')
import requests
from bs4 import BeautifulSoup
# url = "https://www.andhrajyothy.com/andhra-pradesh"
# req = requests.get(url)
# soup = BeautifulSoup(req.content,"html.parser")
from newspaper import Article
# article = Article(input("Enter article URL :"))
# article.download()
# article.parse()
# article.nlp()
import shutil
import os
try:
for i in os.listdir("."):
# print(i.split(".")[-1])
if i == ".config" or i == "sample_data" or i == "rlms.py" or i == "__pycache__":
continue
try:
os.remove(i)
except:
shutil.rmtree("__pycache__")
except Exception as e:
pass
# if i.split(".")[-1] == "png" or i.split(".")[-1] == "jpg":
# print(i)
url = input("Enter article URL :")
im = []
import requests
from bs4 import BeautifulSoup
import pandas as pd
req = requests.get(url)
# print(req)
soup = BeautifulSoup(req.content,"html.parser")
text = ""
for i in soup.findAll("div",{"class":"wsw"}):
for j in i.findAll("p"):
text+=j.text.strip()
# print(text)
for k in soup.findAll("div",{"class":"img-wrap"}):
for l in k.findAll("div",{"class":"thumb thumb16_9"}):
for m in l.findAll("img"):
im.append(m.get("src"))
for n in k.findAll("div",{"class":"thumb"}):
for o in n.findAll("img"):
im.append(o.get("src"))
imgs = len(im)
show = np.random.choice([0,imgs-1])
def _downloadimages():
# url = input("enter imags urls with , speprated :")
for j,i in enumerate(im):
urllib.request.urlretrieve(i, f"{j}.jpg")
_downloadimages()
# input_eng = input("enter ur input text :")
# def show_img():
# display(Image(f'/content/{show}.jpg',width=500, height=250))
# {"Caption":caption(f"{show}.jpg")[0]["generated_text"]}
return summarize(text[:1500].strip()), display(Image(f'{show}.jpg',width=500, height=250)) | PypiClean |
/DiPAS-2.0.tar.gz/DiPAS-2.0/docs/source/examples/transfer_line.ipynb | # Beamline Quadrupole Tuning
The beamline consists of 21 quadrupoles whose strength will be varied in order to fulfill the following optimization targets:
* Beam spot size @ target position: $\sigma_x \leq 500\,\mu m, \sigma_y \leq 500\,\mu m$
* Beam spot size @ beam dump position: $\sigma_x \leq 12\,mm, \sigma_y \leq 12\,mm$
* Fractional beam loss along beamline less than 1%
Let's start with importing everything that we are going to need throughout the optimization process:
```
from collections import deque
import itertools as it
import logging
import math
import os
from pprint import pprint
import statistics
import numpy as np
import pandas as pd
import torch
from dipas.build import from_file, create_script, track_script
from dipas.elements import configure, Quadrupole
from dipas.madx import run_script
```
In the following we define the optimization targets as above:
```
optimization_targets = dict(
target_rms_x = 500e-6, # Beam spot size at target.
target_rms_y = 500e-6,
dump_rms_x = 12e-3, # Beam spot size at beam dump.
dump_rms_y = 12e-3,
loss = 0.01 # Fractional loss along beamline.
)
```
Using the `build.from_file` function we can load the example lattice from a MADX script file:
```
configure(transfer_map_order=1) # Using linear optics to save memory.
lattice = from_file('example.seq')
```
We are only interested in the part of the beamline up to the beam dump position, so we select the corresponding segment:
```
lattice = lattice[:'dump']
print('Last lattice element:', lattice.elements[-1])
```
In a next step we would declare the parameters of the optimization process, i.e. the quadrupoles' gradient strengths. However this step has been done already in the corresponding MADX script. The MADX parser supports special comments of the form `// [flow] variable` to indicate optimization parameters. These comments work for variable definitions as well as attribute updates (e.g. `some_element->k1 = 0.0;`). Let's view the corresponding section of the MADX script file:
```
with open('example.seq') as fh:
print(''.join(fh.readlines()[:30]))
```
We can confirm that the parsed lattice contains the corresponding parameters already:
```
for quad in lattice[Quadrupole]:
print(f'{quad.label}: {quad.k1!r}')
print('Number of parameters:', len(list(lattice.parameters())))
```
If the optimization parameters were not already declared we could also do it manually using the following `for` loop:
```
# for quad in lattice[Quadrupole]:
# quad.k1 = torch.nn.Parameter(quad.k1)
# quad.update_transfer_map() # Need to call this method in order for the change to become effective.
```
In a next step we select the 21 quadrupoles and define some additional properties such as valid boundaries for their k1-values. An important aspect to note here is that k1-values which are marked as optimization parameters must never be zero. This is because internally the polarity of the magnet is derived from the sign of the k1-value (positive sign means horizontally focusing). For that reason we define a small epsilon-boundary instead (any value other than zero would do, no matter how small). Also note that this restriction only applies to k1-values that are `Parameters`. For non-parameters, if the k1-value is zero, the Quadrupole acts as a Drift space.
```
quadrupoles = lattice[Quadrupole]
pprint(quadrupoles)
print()
QPL_limit = 11.1 / 14.62
QPK_limit = 6.88 / 14.62
QPK_magnets = {'gte1qd11', 'gte1qd12', 'ghadqd31', 'ghadqd32', 'ghadqd41', 'ghadqd42'}
polarity = { # +1.0 means horizontally focusing.
'gte1qd11': 1.0,
'gte1qd12': -1.0,
'gte2qt11': 1.0,
'gte2qt12': -1.0,
'gte2qt13': 1.0,
'gth1qd11': 1.0,
'gth1qd12': -1.0,
'gth2qd11': 1.0,
'gth2qd12': -1.0,
'gth2qd21': -1.0,
'gth2qd22': 1.0,
'ghadqd11': -1.0,
'ghadqd12': 1.0,
'ghadqd21': -1.0,
'ghadqd22': 1.0,
'ghadqd31': -1.0,
'ghadqd32': 1.0,
'ghadqd41': 1.0,
'ghadqd42': -1.0,
'ghadqt51': 1.0,
'ghadqt52': -1.0,
}
k1_bounds = {
q.label: sorted([ # Lower bound must come first.
polarity[q.label] * 1e-6, # Variable strength quadrupoles must not be zero to retain their polarity.
polarity[q.label] * (QPK_limit if q.label in QPK_magnets else QPL_limit)
]) for q in quadrupoles
}
pprint(k1_bounds)
```
The initial particle distribution at the entrance of the beamline is stored in a CSV file (5,000 particles):
```
particles = pd.read_csv('particles.csv', index_col=0)
particles = torch.from_numpy(particles.values.T)
print('Particles:', particles.shape)
```
Now let's run a tracking forward pass through the lattice in order to verify everything's set up correctly:
```
x, history, loss = lattice.linear(particles, observe=['target', 'dump'], recloss='sum')
print(x.shape)
print({k: v.shape for k, v in history.items()})
print(loss)
```
Here we can see that out of the initial 5,000 particles only 1,878 make it to the end of the beamline. The remaining 3,122 are lost at the various elements in between and this is reflected in the loss value `loss`. This value is the sum over all elements and for each particle and element it indicates by how much the particle's spatial coordinates exceeded the element's aperture (so it is not directly related to the fraction of particles lost; this value can be computed from the shape of the tensors). If we wanted to know where exactly the particles are lost, we would need to specify `recloss=True` (or more generally `recloss=identifier`, see the documentation of `elements.Segment` for more details). We can also observe that the `loss` value is differentiable, as indicated by the `grad_fn` attribute.
Finally we setup the optimizer that computes the updates for the k1-values during the optimization process. For this example we use the `Adam` optimizer:
```
optimizer = torch.optim.Adam(lattice.parameters(), lr=0.001)
```
Now we're ready to start the optimization:
```
cost_history = []
for epoch in it.count(1):
def closure():
optimizer.zero_grad()
__, history, loss = lattice.linear(particles, observe=['target', 'dump'],
recloss='sum', # Sum the loss per element and per particle.
exact_drift=False) # Linear drifts speed up the computation.
particles_lost = 1.0 - history['dump'].shape[1] / particles.shape[1]
if particles_lost > optimization_targets['loss']:
cost = (loss / particles.shape[1]) / optimization_targets['loss'] # Average loss per particle / target loss.
else:
cost = 0. # Target fractional loss was reached, no need to optimize for that (at the current iteration).
log_dict = dict(epoch=epoch, particles_lost=f'{particles_lost:.2f}')
for place in ('target', 'dump'):
# Only compare spot sizes to targeted ones if no more than 50% of the particles were lost.
if history[place].shape[1] > particles.shape[1] // 2:
x, y = history[place][[0, 2]]
rms_x = x.std()
rms_y = y.std()
cost = (cost + torch.nn.functional.relu(rms_x / optimization_targets[f'{place}_rms_x'] - 1.0)
+ torch.nn.functional.relu(rms_y / optimization_targets[f'{place}_rms_y'] - 1.0))
log_dict.update({f'{place}_rms_x': f'{rms_x.data:.6f}', f'{place}_rms_y': f'{rms_y:.6f}'})
else:
log_dict.update({f'{place}_rms_x': 'n.a.', f'{place}_rms_y': 'n.a.'})
cost_history.append(cost.data.clone())
log_dict['cost_to_optimize'] = cost.data.clone()
print(log_dict)
cost.backward(retain_graph=True) # Transfer maps are reused at every iteration so we need to retain the memory buffers.
return cost
optimizer.step(closure)
if cost_history[-1] == 0:
break
with torch.no_grad():
for q in quadrupoles:
q.k1.data.clamp_(*k1_bounds[q.label]) # Squeeze k1-values back into bounds if necessary.
for q in quadrupoles:
q.update_transfer_map()
```
Eventually the optimization process converged and we plot the cost value over all epochs, to review the progress of the optimization process:
```
%matplotlib inline
import matplotlib.pyplot as plt
plt.plot(cost_history)
plt.yscale('log')
plt.xlabel('epoch')
plt.ylabel('cost')
plt.show()
```
As we can observe from the above plot, the optimization was a bit of a bumpy ride towards the end. Situations like this can often be improved by decreasing the learning rate when approaching the minimum. For that we could have used one of the `torch.optim.lr_scheduler` classes or manually stop the optimization at some point, decrease the learning rate and resume from where we stopped.
To conclude the example we will run a crosscheck with the MADX simulation tool, in order to verify the results. We can serialize the current version of the lattice using the functions `create_script, sequence_script, track_script` from the `build` module. With `madx.utils.run_script` we can run the thus generated script and get back the tracking results in form of `pd.DataFrame` objects.
```
madx_script = create_script(
sequence=lattice,
track=track_script(particles, observe=['target', 'dump'], maxaper=[100]*6), # Aperture is already on the elements.
beam=dict(charge=6, mass=11.1779291448, energy=28.5779291448)
)
with open('result.madx', 'w') as fh:
fh.write(madx_script)
results = run_script(madx_script, ['trackone', 'trackloss'], twiss=True, madx=os.path.expanduser('~/bin/madx'))
print('\nCrosscheck with MADX:')
print('\tFraction of particles lost: ', len(results['trackloss'])/particles.shape[1])
print('\tBeam spot size at target: ', results['trackone'].loc['target', ['X', 'Y']].values.std(axis=0))
print('\tBeam spot size at beam dump:', results['trackone'].loc['dump', ['X', 'Y']].values.std(axis=0))
```
There's a small deviation in the results due to the fact that we used linear optics for the tracking while MADX uses non-linear update formulas.
Finally let's plot the quadrupole gradients along the beamline:
```
ax = results['twiss'][0].set_index('NAME').loc[[q.label.upper() for q in quadrupoles], ['K1L']].plot(kind='bar', figsize=(9, 5))
ax.set_ylabel('K1L [1/m]')
```
| PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/methods/stream/change_stream.py | import asyncio
import logging
import shlex
from typing import Union
from ...exceptions import NodeJSNotRunning
from ...exceptions import NoMtProtoClientSet
from ...exceptions import NotInGroupCallError
from ...file_manager import FileManager
from ...mtproto import BridgedClient
from ...scaffold import Scaffold
from ...types import CaptureAudioDevice
from ...types import CaptureAVDesktop
from ...types import CaptureAVDeviceDesktop
from ...types import CaptureVideoDesktop
from ...types import NotInGroupCall
from ...types import StreamDeleted
from ...types.input_stream import AudioPiped
from ...types.input_stream import AudioVideoPiped
from ...types.input_stream import InputStream
from ...types.input_stream import VideoPiped
from ...types.input_stream.audio_image_piped import AudioImagePiped
from ...types.session import Session
py_logger = logging.getLogger('metacalls')
class ChangeStream(Scaffold):
async def change_stream(
self,
chat_id: Union[int, str],
stream: InputStream,
):
"""Change the streaming file
This method allow to change streaming file
to a Group Call
Parameters:
chat_id (``int`` | ``str``):
Unique identifier of the target chat.
Can be a direct id (int) or a username (str)
stream (:obj:`~metacalls.types.InputStream()`):
Input Streams descriptor, can be used also
:obj:`~metacalls.types.AudioPiped()`,
:obj:`~metacalls.types.AudioImagePiped()`,
:obj:`~metacalls.types.AudioVideoPiped()` or
:obj:`~metacalls.types.VideoPiped()`
Raises:
NoMtProtoClientSet: In case you try
to call this method without any MtProto client
NodeJSNotRunning: In case you try
to call this method without do
:meth:`~metacalls.MetaCalls.start` before
NoActiveGroupCall: In case you try
to edit a not started group call
FileNotFoundError: In case you try
a non-existent file
InvalidStreamMode: In case you try
to set a void stream mode
FFmpegNotInstalled: In case you try
to use the Piped input stream, and
you don't have ffmpeg installed
NoAudioSourceFound: In case you try
to play an audio file from a file
without the sound
NoVideoSourceFound: In case you try
to play a video file from a file
without the video
InvalidVideoProportion: In case you try
to play a video without correct
proportions
NotInGroupCallError: In case you try
to leave a non-joined group call
Example:
.. code-block:: python
:emphasize-lines: 10-15
from metacalls import Client
from metacalls import idle
...
app = MetaCalls(client)
app.start()
... # Call API methods
app.change_stream(
-1001185324811,
AudioPiped(
'test.mp4',
)
)
idle()
"""
try:
chat_id = int(chat_id)
except ValueError:
chat_id = BridgedClient.chat_id(
await self._app.resolve_peer(chat_id),
)
if self._app is not None:
if self._wait_until_run is not None:
headers = None
if isinstance(
stream,
AudioImagePiped,
) or isinstance(
stream,
AudioPiped,
) or isinstance(
stream,
AudioVideoPiped,
) or isinstance(
stream,
VideoPiped,
):
headers = stream.raw_headers
if stream.stream_video is not None:
if not stream.stream_video.path.startswith('screen://'):
await FileManager.check_file_exist(
stream.stream_video.path.replace(
'fifo://',
'',
).replace(
'image:',
'',
),
headers,
)
if stream.stream_audio is not None:
if not stream.stream_audio.path.startswith('device://'):
await FileManager.check_file_exist(
stream.stream_audio.path.replace(
'fifo://',
'',
).replace(
'image:',
'',
),
headers,
)
audio_f_parameters = ''
video_f_parameters = ''
if isinstance(
stream,
AudioImagePiped,
) or isinstance(
stream,
AudioPiped,
) or isinstance(
stream,
AudioVideoPiped,
) or isinstance(
stream,
VideoPiped,
) or isinstance(
stream,
CaptureVideoDesktop,
) or isinstance(
stream,
CaptureAudioDevice,
):
await stream.check_pipe()
if stream.stream_audio:
if stream.stream_audio.header_enabled:
audio_f_parameters = stream.headers
audio_f_parameters += ':_cmd_:'.join(
shlex.split(stream.ffmpeg_parameters),
)
if stream.stream_video:
if stream.stream_video.header_enabled:
video_f_parameters = stream.headers
video_f_parameters += ':_cmd_:'.join(
shlex.split(stream.ffmpeg_parameters),
)
elif isinstance(
stream,
CaptureAVDeviceDesktop,
):
audio_f_parameters += ':_cmd_:'.join(
shlex.split(stream.audio_ffmpeg),
)
video_f_parameters += ':_cmd_:'.join(
shlex.split(stream.video_ffmpeg),
)
elif isinstance(
stream,
CaptureAVDesktop,
):
await stream.check_pipe()
if stream.stream_audio:
if stream.stream_audio.header_enabled:
audio_f_parameters = stream.headers
audio_f_parameters += ':_cmd_:'.join(
shlex.split(stream.audio_ffmpeg),
)
video_f_parameters += ':_cmd_:'.join(
shlex.split(stream.video_ffmpeg),
)
solver_id = Session.generate_session_id(24)
async def internal_sender():
if not self._wait_until_run.done():
await self._wait_until_run
stream_audio = stream.stream_audio
stream_video = stream.stream_video
request = {
'action': 'change_stream',
'chat_id': chat_id,
'lip_sync': stream.lip_sync,
'solver_id': solver_id,
}
if stream_audio is not None:
request['stream_audio'] = {
'path': stream_audio.path,
'bitrate': stream_audio.parameters.bitrate,
'ffmpeg_parameters': audio_f_parameters,
}
if stream.stream_video is not None:
video_parameters = stream_video.parameters
if video_parameters.frame_rate % 5 != 0 and \
not isinstance(stream, AudioImagePiped):
py_logger.warning(
'For better experience the '
'video frame rate must be a multiple of 5',
)
request['stream_video'] = {
'path': stream_video.path,
'width': video_parameters.width,
'height': video_parameters.height,
'framerate': video_parameters.frame_rate,
'ffmpeg_parameters': video_f_parameters,
}
await self._binding.send(request)
asyncio.ensure_future(internal_sender())
result = await self._wait_result.wait_future_update(
solver_id,
)
if isinstance(result, NotInGroupCall):
raise NotInGroupCallError()
elif isinstance(result, StreamDeleted):
raise FileNotFoundError()
else:
raise NodeJSNotRunning()
else:
raise NoMtProtoClientSet() | PypiClean |
/LibRecommender-1.3.0-cp38-cp38-macosx_10_9_x86_64.whl/libreco/evaluation/evaluate.py | import functools
import math
import numbers
import numpy as np
import pandas as pd
from sklearn.metrics import log_loss, mean_absolute_error, r2_score, roc_auc_score
from .computation import (
build_eval_transformed_data,
compute_preds,
compute_probs,
compute_recommends,
)
from .metrics import (
LISTWISE_METRICS,
POINTWISE_METRICS,
RANKING_METRICS,
RATING_METRICS,
average_precision_at_k,
balanced_accuracy,
listwise_scores,
ndcg_at_k,
pr_auc_score,
precision_at_k,
rec_coverage,
recall_at_k,
rmse,
roc_gauc_score,
)
from ..data import TransformedEvalSet
def _check_metrics(task, metrics, k):
if not isinstance(metrics, (list, tuple)):
metrics = [metrics]
if task == "rating":
for m in metrics:
if m not in RATING_METRICS:
raise ValueError(f"Metrics `{m}` is not suitable for rating task...")
elif task == "ranking":
for m in metrics:
if m not in RANKING_METRICS:
raise ValueError(f"Metrics `{m}` is not suitable for ranking task...")
if not isinstance(k, numbers.Integral):
raise TypeError("`k` must be integer")
return metrics
def sample_users(data, seed, num):
np_rng = np.random.default_rng(seed)
unique_users = list(data.positive_consumed)
if isinstance(num, numbers.Integral) and 0 < num < len(unique_users):
users = np_rng.choice(unique_users, num, replace=False).tolist()
else:
users = unique_users
return users
def evaluate(
model,
data,
neg_sampling,
eval_batch_size=8192,
metrics=None,
k=10,
sample_user_num=None,
seed=42,
):
"""Evaluate the model on specific data and metrics.
Parameters
----------
model : Base
Model for evaluation.
data : :class:`pandas.DataFrame` or :class:`~libreco.data.TransformedEvalSet`
Data to evaluate.
neg_sampling : bool
Whether to perform negative sampling for evaluating data.
eval_batch_size : int, default: 8192
Batch size used in evaluation.
metrics : list or None, default: None
List of metrics for evaluating.
k : int, default: 10
Parameter of metrics, e.g. recall at k, ndcg at k
sample_user_num : int or None, default: None
Number of users used in evaluating. By default, it will use all the users in eval_data.
Setting it to a positive number will sample users randomly from eval data.
seed : int, default: 42
Random seed.
Returns
-------
eval_results : dict of {str : float}
Evaluation results for the model and data.
Examples
--------
>>> eval_result = evaluate(model, data, neg_sampling=True, metrics=["roc_auc", "precision", "recall"])
"""
if not isinstance(data, (pd.DataFrame, TransformedEvalSet)):
raise ValueError("`data` must be `pandas.DataFrame` or `TransformedEvalSet`")
data = build_eval_transformed_data(model, data, neg_sampling, seed)
if not metrics:
metrics = ["loss"]
metrics = _check_metrics(model.task, metrics, k)
eval_result = dict()
if model.task == "rating":
y_pred, y_true = compute_preds(model, data, eval_batch_size)
for m in metrics:
if m in ["rmse", "loss"]:
eval_result[m] = rmse(y_true, y_pred)
elif m == "mae":
eval_result[m] = mean_absolute_error(y_true, y_pred)
elif m == "r2":
eval_result[m] = r2_score(y_true, y_pred)
else:
if POINTWISE_METRICS.intersection(metrics):
y_prob, y_true = compute_probs(model, data, eval_batch_size)
for m in metrics:
if m in ["log_loss", "loss"]:
eval_result[m] = log_loss(y_true, y_prob, eps=1e-7)
elif m == "balanced_accuracy":
eval_result[m] = balanced_accuracy(y_true, y_prob)
elif m == "roc_auc":
eval_result[m] = roc_auc_score(y_true, y_prob)
elif m == "roc_gauc":
eval_result[m] = roc_gauc_score(y_true, y_prob, data.user_indices)
elif m == "pr_auc":
eval_result[m] = pr_auc_score(y_true, y_prob)
if LISTWISE_METRICS.intersection(metrics):
users = sample_users(data, seed, sample_user_num)
num_batch_users = max(1, math.floor(eval_batch_size / model.n_items))
y_trues = data.positive_consumed
y_recos = compute_recommends(model, users, k, num_batch_users)
for m in metrics:
if m not in LISTWISE_METRICS:
continue
if m == "coverage":
eval_result[m] = rec_coverage(y_recos, users, model.n_items)
continue
elif m == "precision":
fn = precision_at_k
elif m == "recall":
fn = recall_at_k
elif m == "map":
fn = average_precision_at_k
elif m == "ndcg":
fn = ndcg_at_k
# noinspection PyUnboundLocalVariable
eval_result[m] = listwise_scores(fn, y_trues, y_recos, users, k)
return eval_result
def print_metrics(
model,
neg_sampling,
# train_data=None,
eval_data=None,
metrics=None,
eval_batch_size=8192,
k=10,
sample_user_num=2048,
seed=42,
):
loss_name = "rmse" if model.task == "rating" else "log_loss"
metrics_fn = functools.partial(
evaluate,
model=model,
neg_sampling=neg_sampling,
eval_batch_size=eval_batch_size,
k=k,
sample_user_num=sample_user_num,
seed=seed,
)
# if train_data:
# train_metrics = metrics_fn(data=train_data, metrics=[loss_name])
# print(f"\t train {loss_name}: {train_metrics[loss_name]:.4f}")
if eval_data:
eval_metrics = metrics_fn(data=eval_data, metrics=metrics)
for m, val in eval_metrics.items():
if m == "loss":
metric = loss_name
elif m in LISTWISE_METRICS:
metric = f"{m}@{k}"
else:
metric = m
str_val = f"{round(val, 2)}%" if m == "coverage" else f"{val:.4f}"
print(f"\t eval {metric}: {str_val}") | PypiClean |
/FanFicFare-4.27.0.tar.gz/FanFicFare-4.27.0/fanficfare/adapters/adapter_asexstoriescom.py |
# Copyright 2013 Fanficdownloader team, 2018 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import os
from bs4.element import Comment
from .. import exceptions as exceptions
# py2 vs py3 transition
from ..six.moves.urllib import parse as urlparse
from .base_adapter import BaseSiteAdapter, makeDate
def getClass():
return ASexStoriesComAdapter
class ASexStoriesComAdapter(BaseSiteAdapter):
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
self.story.setMetadata('siteabbrev','asscom')
# Extract story ID from base URL, http://www.asexstories.com/Halloween-party-with-the-phantom/
storyId = self.parsedUrl.path.split('/',)[1]
self.story.setMetadata('storyId', storyId)
## set url
self._setURL(url)
@staticmethod
def getSiteDomain():
return 'www.asexstories.com'
@classmethod
def getAcceptDomains(cls):
return ['www.asexstories.com']
@classmethod
def getSiteExampleURLs(cls):
return "http://www.asexstories.com/StoryTitle/"
def getSiteURLPattern(self):
return r"https?://(www\.)?asexstories\.com/([a-zA-Z0-9_-]+)/"
def extractChapterUrlsAndMetadata(self):
"""
Chapters are located at /StoryName/ (for single-chapter
stories), or //StoryName/index#.html for multiple chapters (# is a
non-padded incrementing number, like StoryName1, StoryName2.html, ...,
StoryName10.html)
This site doesn't have much in the way of metadata, except on the
Category and Tags index pages. so we will get what we can.
Also, as this is an Adult site, the is_adult check is mandatory.
"""
if not (self.is_adult or self.getConfig("is_adult")):
raise exceptions.AdultCheckRequired(self.url)
data1 = self.get_request(self.url)
soup1 = self.make_soup(data1)
#strip comments from soup
[comment.extract() for comment in soup1.find_all(text=lambda text:isinstance(text, Comment))]
if 'Page Not Found.' in data1:
raise exceptions.StoryDoesNotExist(self.url)
url = self.url
# Extract metadata
# Title
title = soup1.find('div',{'class':'story-top-block'}).find('h1')
self.story.setMetadata('title', title.string)
# Author
author = soup1.find('div',{'class':'story-info'}).findAll('div',{'class':'story-info-bl'})[1].find('a')
authorurl = author['href']
self.story.setMetadata('author', author.string)
self.story.setMetadata('authorUrl', authorurl)
authorid = os.path.splitext(os.path.basename(authorurl))[0]
self.story.setMetadata('authorId', authorid)
# Description
### The only way to get the Description (summary) is to
### parse through the Category and/or Tags index pages.
### To get a summary, I've taken the first 150 characters
### from the story.
description = soup1.find('div',{'class':'story-block'}).get_text(strip=True)
description = description.encode('utf-8','ignore').strip()[0:150].decode('utf-8','ignore')
self.setDescription(url,'Excerpt from beginning of story: '+description+'...')
### The first 'chapter' is not listed in the links, so we have to
### add it before the rest of the pages, if any
self.add_chapter('1', self.url)
chapterTable = soup1.find('div',{'class':'pages'}).findAll('a')
if chapterTable is not None:
# Multi-chapter story
for page in chapterTable:
chapterTitle = page.string
chapterUrl = urlparse.urljoin(self.url, page['href'])
if chapterUrl.startswith(self.url): # there are other URLs in the pages block now.
self.add_chapter(chapterTitle, chapterUrl)
rated = soup1.find('div',{'class':'story-info'}).findAll('div',{'class':'story-info-bl5'})[0].find('img')['title'].replace('- Rate','').strip()
self.story.setMetadata('rating',rated)
self.story.setMetadata('dateUpdated', makeDate('01/01/2001', '%m/%d/%Y'))
logger.debug("Story: <%s>", self.story)
return
def getChapterText(self, url):
logger.debug('Getting chapter text from <%s>' % url)
#logger.info('Getting chapter text from <%s>' % url)
data1 = self.get_request(url)
soup1 = self.make_soup(data1)
# get story text
story1 = soup1.find('div', {'class':'story-block'})
### This site has links embeded in the text that lead
### to either a video site, or to a tags index page
### the default is to remove them, but you can set the
### strip_text_links to false to keep them in the text
if self.getConfig('strip_text_links'):
for anchor in story1('a', {'target': '_blank'}):
anchor.replaceWith(anchor.string)
## remove ad links in the story text and their following <br>
for anchor in story1('a', {'rel': 'nofollow'}):
br = anchor.find_next_sibling('br')
if br:
br.extract()
anchor.extract()
return self.utf8FromSoup(url, story1) | PypiClean |
/CNN4IE-0.1.9-py3-none-any.whl/cnn4ie/attention_augmented_cnn/model.py | import torch
import torch.nn.functional as F
import torch.nn as nn
from cnn4ie.util import crf
from cnn4ie.attention_augmented_cnn.attn_aug_cnn import AugmentedConv
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class Encoder(nn.Module):
def __init__(self, emb_dim, hid_dim, n_layers, kernel_size, dropout):
'''
define encoder
:param emb_dim:
:param hid_dim:
:param n_layers:
:param kernel_size:
:param dropout:
'''
super(Encoder, self).__init__()
# for kernel in kernel_size:
assert kernel_size % 2 == 1, 'kernel size must be odd!' # kernel is odd, which is convenient for PAD processing on both sides of the sequence
self.scale = torch.sqrt(torch.FloatTensor([0.5])).to(DEVICE) # the variance of the entire network does not change significantly
self.emb2hid = nn.Linear(emb_dim, hid_dim) # fc: emb_dim -> hid_dim
self.hid2emb = nn.Linear(hid_dim, emb_dim) # fc: hid_dim -> emb_dim
# convolution block
# in_channels=hid_dim, out_channels=hid_dim, kernel_size=3, dk=2*hid_dim, dv=int(0.2*hid_dim), Nh=4, stride=1
self.convs = nn.ModuleList([AugmentedConv( in_channels=hid_dim,
out_channels=hid_dim, # the dimension of convolution output,2*hid_dim -> GLU activation function
kernel_size=kernel_size,
dk=2* hid_dim,
dv=hid_dim,
Nh=4,
padding=(kernel_size - 1) // 2)# padding zero for both side of the sequence, keeping the dimension does't change
for _ in range(n_layers)]) # convolution layer
self.dropout = nn.Dropout(dropout)
#self.BN = nn.BatchNorm1d()
def forward(self, encoder_output):
# encoder_output:[batch_size, src_len, emb_dim]
# emb_dim -> hid_dim, as the input of convolution layers
conv_input = self.emb2hid(encoder_output) # [batch_size, src_len, hid_dim]
# change dimension,convolve the last dimension of input
conv_input = conv_input.permute(0, 2, 1) # [batch_size, hid_dim, src_len]
# convolution block
for i, conv in enumerate(self.convs):
conved = conv(self.dropout(conv_input)) # [batch_size, 2*hid_dim, src_len]
#conved = self.BN(conved) # [batch_size, 2*hid_dim, src_len]
# GLU activation function
conved = F.glu(conved, dim=1) # [batch_size, hid_dim, src_len]
# residual connection
conved = (conved + conv_input) * self.scale # [batch_size, hid_dim, src_len]
# input of the next convolution layer
conv_input = conved
# hid_dim -> emb_dim,as the output of convolution block
conved = self.hid2emb(conved.permute(0, 2, 1)) # [batch_size, src_len, emb_dim]
# residual connection,as the joint output feature of encoder
combined = (conved + encoder_output) * self.scale # [batch_size, src_len, emb_dim]
return conved, combined
class MultiLayerAugmentedCNN(nn.Module):
def __init__(self, input_dim, output_dim, emb_dim, hid_dim, cnn_layers, encoder_layers, kernel_size, dropout, PAD_IDX, max_length=100, use_crf = True):
'''
define berc model
:param input_dim:
:param output_dim:
:param emb_dim:
:param hid_dim:
:param cnn_layers:
:param encoder_layers:
:param kernel_size:
:param dropout:
:param padding_idx:
:param max_length:
'''
super(MultiLayerAugmentedCNN, self).__init__()
self.tok_embedding = nn.Embedding(input_dim, emb_dim, padding_idx=PAD_IDX) # token embedding
self.pos_embedding = nn.Embedding(max_length, emb_dim, padding_idx=PAD_IDX) # position embedding
self.encoder = nn.ModuleList([Encoder(emb_dim, hid_dim, cnn_layers, kernel_size, dropout)
for _ in range(encoder_layers)])
self.dropout = nn.Dropout(dropout)
self.fc_out = nn.Linear(emb_dim, output_dim)
self.crf = crf.CRF(output_dim, batch_first=True)
self.use_crf = use_crf
def forward(self, token_tensor):
'''
:param token_tensor: [batch_size, src_len]
:return:
'''
# token, position embedding
tok_embedded = self.tok_embedding(token_tensor) # [batch_size, src_len, emb_dim]
# 构建位置tensor -> [batch_size, src_len],位置序号从(0)开始到(src_len-1)
position = torch.arange(0, token_tensor.shape[1]).unsqueeze(0).repeat(token_tensor.shape[0], 1).to(DEVICE)
pos_embedded = self.pos_embedding(position.long()) # [batch_size, src_len, emb_dim]
# token embedded + pos_embedded
embedded = self.dropout(tok_embedded + pos_embedded) # [batch_size, src_len, emb_dim]
encoder_output = embedded
# encoder block
for i, encoder in enumerate(self.encoder):
# encoding
conved, encoder_output = encoder(self.dropout(encoder_output)) # [batch_size, src_len, emb_dim]
# pooling, predict class of the entire sentence
# encoder_output = F.avg_pool1d(encoder_output.permute(0, 2, 1), encoder_output.shape[1]).squeeze(2) # [batch_size, emb_dim]
# output = self.fc_out(encoder_output) # [batch_size, output_dim]
# fc outuput
output = self.fc_out(encoder_output) # [batch_size, src_len, output_dim]
if self.use_crf:
# crf
output = self.crf.decode(output)
return output
def log_likelihood(self, source, target):
'''
:param source: [batch_size, src_len]
:param target: [batch_size, src_len]
:return:
'''
# token, position embedding
tok_embedded = self.tok_embedding(source) # [batch_size, src_len, emb_dim]
# 构建位置tensor -> [batch_size, src_len],位置序号从(0)开始到(src_len-1)
position = torch.arange(0, source.shape[1]).unsqueeze(0).repeat(source.shape[0], 1).to(DEVICE)
pos_embedded = self.pos_embedding(position.long()) # [batch_size, src_len, emb_dim]
# token embedded + pos_embedded
embedded = self.dropout(tok_embedded + pos_embedded) # [batch_size, src_len, emb_dim]
encoder_output = embedded
# encoder block
for i, encoder in enumerate(self.encoder):
# encoding
conved, encoder_output = encoder(self.dropout(encoder_output)) # [batch_size, src_len, emb_dim]
# pooling, predict class of the entire sentence
# encoder_output = F.avg_pool1d(encoder_output.permute(0, 2, 1), encoder_output.shape[1]).squeeze(2) # [batch_size, emb_dim]
# output = self.fc_out(encoder_output) # [batch_size, output_dim]
# sequence labeling
outputs = self.fc_out(encoder_output) # [batch_size, src_len, output_dim]
return -self.crf(outputs, target) | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/resolve/test/pathfilter.js | var path = require('path');
var test = require('tape');
var resolve = require('../');
var resolverDir = path.join(__dirname, '/pathfilter/deep_ref');
var pathFilterFactory = function (t) {
return function (pkg, x, remainder) {
t.equal(pkg.version, '1.2.3');
t.equal(x, path.join(resolverDir, 'node_modules/deep/ref'));
t.equal(remainder, 'ref');
return 'alt';
};
};
test('#62: deep module references and the pathFilter', function (t) {
t.test('deep/ref.js', function (st) {
st.plan(3);
resolve('deep/ref', { basedir: resolverDir }, function (err, res, pkg) {
if (err) st.fail(err);
st.equal(pkg.version, '1.2.3');
st.equal(res, path.join(resolverDir, 'node_modules/deep/ref.js'));
});
var res = resolve.sync('deep/ref', { basedir: resolverDir });
st.equal(res, path.join(resolverDir, 'node_modules/deep/ref.js'));
});
t.test('deep/deeper/ref', function (st) {
st.plan(4);
resolve(
'deep/deeper/ref',
{ basedir: resolverDir },
function (err, res, pkg) {
if (err) t.fail(err);
st.notEqual(pkg, undefined);
st.equal(pkg.version, '1.2.3');
st.equal(res, path.join(resolverDir, 'node_modules/deep/deeper/ref.js'));
}
);
var res = resolve.sync(
'deep/deeper/ref',
{ basedir: resolverDir }
);
st.equal(res, path.join(resolverDir, 'node_modules/deep/deeper/ref.js'));
});
t.test('deep/ref alt', function (st) {
st.plan(8);
var pathFilter = pathFilterFactory(st);
var res = resolve.sync(
'deep/ref',
{ basedir: resolverDir, pathFilter: pathFilter }
);
st.equal(res, path.join(resolverDir, 'node_modules/deep/alt.js'));
resolve(
'deep/ref',
{ basedir: resolverDir, pathFilter: pathFilter },
function (err, res, pkg) {
if (err) st.fail(err);
st.equal(res, path.join(resolverDir, 'node_modules/deep/alt.js'));
st.end();
}
);
});
t.end();
}); | PypiClean |
/EMalign-1.0.5.tar.gz/EMalign-1.0.5/README.md | <h1>emalign</h1>
A algorithm for aligning rotation, reflection, and translation between volumes.
Current version: 1.0.5
Project's homepage: https://github.com/ShkolniskyLab/emalign
Date: 08/2023
Please cite the following paper when using this package:
Harpaz, Y., & Shkolnisky, Y. (2023). Three-dimensional alignment of density maps in cryo-electron microscopy. Biological Imaging, 3, E8. doi:10.1017/S2633903X23000089
<h2>Recommended Environments:</h2>
The package has been tested on Ubuntu 18.04 and Windows 10. It should probably work on other versions of Windows and Linux, but has not been tested on them yet. Similarly for macOS.
* Python 3.6.0+ is required.
* The package makes use of the pyfftw package, which in turn uses the FFTW library. Before installing emalign make sure you have the FFTW library installed on your system: http://www.fftw.org/fftw3_doc/Installation-and-Customization.html#Installation-and-Customization
<h2>Install emalign</h2>
<h3>Install emalign via pip:</h3>
We recommend installing emalign via pip:
$ pip install emalign
<h3>Install emalign from source</h3>
The tarball of the source tree is available via pip download emalign. You can install emalign from the tarball:
$ pip install emalign-x.x.x.tar.gz
You can also install the development version of emalign from a cloned Git repository:
$ git clone https://github.com/ShkolniskyLab/emalign.git
$ cd emalign
$ pip install .
<h2>Uninstall emalign</h2>
Use pip to uninstall emalign:
$ pip uninstall emalign
<h2>Upgrade emalign</h2>
Just use pip with -U option:
$ pip install -U emalign
<h2>Getting started:</h2>
Please read the user manual for usage instructions, available at the homepage of the project on Github: https://github.com/ShkolniskyLab/emalign
<h2>Basic usage:</h2>
Generate test data via
$ emalign --make-test-data -v
This will download EMD-2660 from EMDB (https://www.ebi.ac.uk/emdb/), downsample it to size 129 pixels (with pixel size
3.74A), and save the downsampled map into map_ref_2660.mrc. The function then rotates and shifts the map (see log messages
for the exact transformation parameters) and saves the transformed map to map_transformed_2660.mrc. These two maps can
be used to test the alignment algorithm.
Run the alignment algorithm via
$ emalign -v1 ./map_ref_2660.mrc -v2 ./map_transformed_2660.mrc -o ./map_aligned_2660.mrc -v
The algorithm will align v2 to v1, saving the aligned map.
Type
```
$ emalign -h
```
for help.
| PypiClean |
/Makechr-1.5.tar.gz/Makechr-1.5/makechr/object_file_writer.py | import gen.valiant_pb2 as valiant
import StringIO
MAGIC_NUM = 7210303610482106886
class DataInfo(object):
def __init__(self):
self.clear()
def clear(self):
self.name = None
self.align = None
self.size = None
self.order = None
self.null_value = None
self.is_condensable = False
def empty(self):
return self.name is None
class ObjectFileWriter(object):
"""Creates a valiant object file from ppu memory.
A valiant object file is a structured binary file, built using protocol
buffers. It can hold multiple types of graphical data, such as nametable,
chr, palette, attributes, in a single file, along with some metadata. See
valiant.proto for more information.
"""
def __init__(self):
self.file_obj = valiant.ObjectFile()
self.file_obj.magic1 = MAGIC_NUM % 100
self.file_obj.magic2 = MAGIC_NUM / 100
self.obj_body = self.file_obj.body
self.buffer = None
self.info = DataInfo()
self.component_req = {}
def get_writable(self, name, is_condensable):
if not self.info.empty():
self.add_component(self.buffer.getvalue(), self.info)
self.buffer = StringIO.StringIO()
self.info.clear()
self.info.name = name
self.info.is_condensable = is_condensable
return self.buffer
def close(self):
if not self.info.empty():
self.add_component(self.buffer.getvalue(), self.info)
self.info.clear()
def configure(self, null_value=None, size=None, order=None, align=None,
extract=None):
self.info.null_value = null_value
self.info.size = size
self.info.order = order
self.info.align = align
self.component_req[self.info.name] = extract
def write_module(self, module_name):
"""Write the module name to the valiant object."""
self.file_obj.header.module = module_name
def write_bg_color(self, bg_color):
"""Write the bg_color metadata to the valiant object."""
settings = self.obj_body.settings
settings.bg_color = bg_color
def write_chr_info(self, chr_data):
"""Write size of chr data."""
chr_metadata = self._get_chr_metadata()
chr_metadata.size = chr_data.size()
def write_extra_settings(self, config):
"""Write extra settings to the valiant object."""
if (config.traversal == 'horizontal' and not config.chr_order and
not config.palette_order and not config.is_locked_tiles):
return
chr_metadata = self._get_chr_metadata()
if config.chr_order:
chr_metadata.order = config.chr_order
if config.is_locked_tiles:
chr_metadata.is_locked_tiles = config.is_locked_tiles
if config.traversal and config.traversal != 'horizontal':
chr_metadata.traversal = config.traversal
if config.is_sprite:
palette_metadata = self._get_palette_metadata()
palette_metadata.order = 1
def add_component(self, bytes, info):
pad_size = info.size - len(bytes) if (info.size is not None) else None
if info.is_condensable:
pre_pad, padding, bytes = self._condense(bytes, info.align, pad_size)
else:
pre_pad = padding = 0
role = valiant.DataRole.Value(self._strip_num_suffix(info.name.upper()))
binary = valiant.DirectBinary()
binary.bin = bytes
if info.null_value:
binary.null_value = info.null_value
if pre_pad is not None:
binary.pre_pad = pre_pad
if padding is not None:
binary.padding = padding
packet = self.obj_body.packets.add()
packet.role = role
packet.binary.CopyFrom(binary)
def _strip_num_suffix(self, text):
while text and text[-1].isdigit():
text = text[:-1]
return text
# TODO: Test.
def _condense(self, bytes, align, extra_padding):
if not align:
align = 1
size = len(bytes)
first = bytes[0]
first_width = next((i for i,n in enumerate(bytes) if first != n), size)
first_width = first_width / align * align
if first_width <= align:
first_width = 0
last = bytes[size - 1]
last_width = next((i for i,n in enumerate(reversed(bytes)) if last != n), 0)
last_width = last_width / align * align
if last_width <= align:
last_width = 0
bytes = bytes[first_width:size - last_width]
if not first_width:
first_width = None
# TODO: Need to check null_value.
if extra_padding:
last_width += extra_padding
if not last_width:
last_width = None
return (first_width, last_width, bytes)
def _get_chr_metadata(self):
for packet in self.obj_body.packets:
if packet.role == valiant.CHR:
return packet.metadata.chr_metadata
raise RuntimeError('Could not find CHR packet')
def _get_palette_metadata(self):
for packet in self.obj_body.packets:
if packet.role == valiant.PALETTE:
return packet.metadata.palette_metadata
raise RuntimeError('Could not find PALETTE packet')
def save(self, filename):
serialized = self.file_obj.SerializeToString()
fp = open(filename, 'wb')
fp.write(serialized)
fp.close() | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/extensions/Safe.js | (function(d,c){var f="2.7.9";var a=MathJax.Hub.CombineConfig("Safe",{allow:{URLs:"safe",classes:"safe",cssIDs:"safe",styles:"safe",fontsize:"all",require:"safe"},sizeMin:0.7,sizeMax:1.44,lengthMax:3,safeProtocols:{http:true,https:true,file:true,javascript:false},safeStyles:{color:true,backgroundColor:true,border:true,cursor:true,margin:true,padding:true,textShadow:true,fontFamily:true,fontSize:true,fontStyle:true,fontWeight:true,opacity:true,outline:true},safeRequire:{action:true,amscd:true,amsmath:true,amssymbols:true,autobold:false,"autoload-all":false,bbox:true,begingroup:true,boldsymbol:true,cancel:true,color:true,enclose:true,extpfeil:true,HTML:true,mathchoice:true,mhchem:true,newcommand:true,noErrors:false,noUndefined:false,unicode:true,verb:true},styleParts:{border:true,padding:true,margin:true,outline:true},styleLengths:{borderTop:"borderTopWidth",borderRight:"borderRightWidth",borderBottom:"borderBottomWidth",borderLeft:"borderLeftWidth",paddingTop:true,paddingRight:true,paddingBottom:true,paddingLeft:true,marginTop:true,marginRight:true,marginBottom:true,marginLeft:true,outlineTop:true,outlineRight:true,outlineBottom:true,outlineLeft:true,fontSize:[0.7,1.44]}});var e=a.allow;if(e.fontsize!=="all"){a.safeStyles.fontSize=false}var b=MathJax.Extension.Safe={version:f,config:a,div1:document.createElement("div"),div2:document.createElement("div"),filter:{href:"filterURL",src:"filterURL",altimg:"filterURL","class":"filterClass",style:"filterStyles",id:"filterID",fontsize:"filterFontSize",mathsize:"filterFontSize",scriptminsize:"filterFontSize",scriptsizemultiplier:"filterSizeMultiplier",scriptlevel:"filterScriptLevel"},filterURL:function(g){var h=(g.match(/^\s*([a-z]+):/i)||[null,""])[1].toLowerCase();if(e.URLs==="none"||(e.URLs!=="all"&&!a.safeProtocols[h])){g=null}return g},filterClass:function(g){if(e.classes==="none"||(e.classes!=="all"&&!g.match(/^MJX-[-a-zA-Z0-9_.]+$/))){g=null}return g},filterID:function(g){if(e.cssIDs==="none"||(e.cssIDs!=="all"&&!g.match(/^MJX-[-a-zA-Z0-9_.]+$/))){g=null}return g},filterStyles:function(l){if(e.styles==="all"){return l}if(e.styles==="none"){return null}try{var k=this.div1.style,j=this.div2.style,m;k.cssText=l;j.cssText="";for(var g in a.safeStyles){if(a.safeStyles.hasOwnProperty(g)){if(a.styleParts[g]){for(var h=0;h<4;h++){var o=g+["Top","Right","Bottom","Left"][h];m=this.filterStyle(o,k);if(m){j[o]=m}}}else{m=this.filterStyle(g,k);if(m){j[g]=m}}}}l=j.cssText}catch(n){l=null}return l},filterStyle:function(g,h){var i=h[g];if(typeof i!=="string"||i===""){return null}if(i.match(/^\s*expression/)){return null}if(i.match(/javascript:/)){return null}var j=g.replace(/Top|Right|Left|Bottom/,"");if(!a.safeStyles[g]&&!a.safeStyles[j]){return null}if(!a.styleLengths[g]){return i}return(this.filterStyleLength(g,i,h)?i:null)},filterStyleLength:function(g,i,h){if(typeof a.styleLengths[g]==="string"){i=h[a.styleLengths[g]]}i=this.length2em(i);if(i==null){return false}var j=[-a.lengthMax,a.lengthMax];if(MathJax.Object.isArray(a.styleLengths[g])){j=a.styleLengths[g]}return(i>=j[0]&&i<=j[1])},unit2em:{em:1,ex:0.5,ch:0.5,rem:1,px:1/16,mm:96/25.4/16,cm:96/2.54/16,"in":96/16,pt:96/72/16,pc:96/6/16},length2em:function(h){var g=h.match(/(.+)(em|ex|ch|rem|px|mm|cm|in|pt|pc)/);if(!g){return null}return parseFloat(g[1])*this.unit2em[g[2]]},filterSize:function(g){if(e.fontsize==="none"){return null}if(e.fontsize!=="all"){g=Math.min(Math.max(g,a.sizeMin),a.sizeMax)}return g},filterFontSize:function(g){return(e.fontsize==="all"?g:null)},filterSizeMultiplier:function(g){if(e.fontsize==="none"){g=null}else{if(e.fontsize!=="all"){g=Math.min(1,Math.max(0.6,g)).toString()}}return g},filterScriptLevel:function(g){if(e.fontsize==="none"){g=null}else{if(e.fontsize!=="all"){g=Math.max(0,g).toString()}}return g},filterRequire:function(g){if(e.require==="none"||(e.require!=="all"&&!a.safeRequire[g.toLowerCase()])){g=null}return g}};d.Register.StartupHook("TeX HTML Ready",function(){var g=MathJax.InputJax.TeX;g.Parse.Augment({HREF_attribute:function(j){var i=b.filterURL(this.GetArgument(j)),h=this.GetArgumentMML(j);if(i){h.With({href:i})}this.Push(h)},CLASS_attribute:function(i){var j=b.filterClass(this.GetArgument(i)),h=this.GetArgumentMML(i);if(j){if(h["class"]!=null){j=h["class"]+" "+j}h.With({"class":j})}this.Push(h)},STYLE_attribute:function(i){var j=b.filterStyles(this.GetArgument(i)),h=this.GetArgumentMML(i);if(j){if(h.style!=null){if(j.charAt(j.length-1)!==";"){j+=";"}j=h.style+" "+j}h.With({style:j})}this.Push(h)},ID_attribute:function(j){var i=b.filterID(this.GetArgument(j)),h=this.GetArgumentMML(j);if(i){h.With({id:i})}this.Push(h)}})});d.Register.StartupHook("TeX Jax Ready",function(){var i=MathJax.InputJax.TeX,h=i.Parse,g=b.filter;h.Augment({Require:function(j){var k=this.GetArgument(j).replace(/.*\//,"").replace(/[^a-z0-9_.-]/ig,"");k=b.filterRequire(k);if(k){this.Extension(null,k)}},MmlFilterAttribute:function(j,k){if(g[j]){k=b[g[j]](k)}return k},SetSize:function(j,k){k=b.filterSize(k);if(k){this.stack.env.size=k;this.Push(i.Stack.Item.style().With({styles:{mathsize:k+"em"}}))}}})});d.Register.StartupHook("TeX bbox Ready",function(){var g=MathJax.InputJax.TeX;g.Parse.Augment({BBoxStyle:function(h){return b.filterStyles(h)},BBoxPadding:function(i){var h=b.filterStyles("padding: "+i);return(h?i:0)}})});d.Register.StartupHook("MathML Jax Ready",function(){var h=MathJax.InputJax.MathML.Parse,g=b.filter;h.Augment({filterAttribute:function(i,j){if(g[i]){j=b[g[i]](j)}return j}})});d.Startup.signal.Post("Safe Extension Ready");c.loadComplete("[MathJax]/extensions/Safe.js")})(MathJax.Hub,MathJax.Ajax); | PypiClean |
/MeUtils-2023.8.29.13.9.44-py3-none-any.whl/meutils/np_utils.py |
from itertools import combinations
from collections import defaultdict
# ME
from meutils.pipe import *
# 分组
# np.array_split(range(6), 3)
# iteration_utilities.split
# iteration_utilities.grouper([1,2,3,4], 2) | xlist
# 展平
"""
l=[[1,2,3],[4,[5],[6,7]],[8,[9,[10]]]]*1000
from iteration_utilities import deepflatten
_ = list(deepflatten(l)) # 快十倍
_ = sum(l, [])
"""
def normalize(x: np.ndarray) -> np.ndarray:
"""clip
x /= np.clip(np.linalg.norm(x, axis=-1, keepdims=True), 1e-12, None)
"""
return x / np.linalg.norm(x, axis=-1, keepdims=True)
def cosine(v1, v2): # 相似度不是距离
"""
v1 = np.array([[1, 2], [3, 4]])
v2 = np.array([[5, 6], [7, 8], [5, 6]])
cosine_dist(v1, v2)
"""
assert v1.ndim == v2.ndim
if v1.ndim == 1:
v1, v2 = v1.reshape(1, -1), v2.reshape(1, -1)
from sklearn.metrics.pairwise import cosine_similarity
return cosine_similarity(v1, v2).clip(0, 1)
def cosine_topk(v1, v2, topk=10): # 相似度不是距离
dist = - cosine(v1, v2)
idxs = np.argsort(dist)[:, :topk]
scores = - np.take_along_axis(dist, idxs, -1) # 取出得分
return idxs[0], scores[0]
def cosine_similarity(v1, v2):
"""先归一化再点乘,比from sklearn.metrics.pairwise import cosine_similarity 快10倍"""
v1, v2 = map(lambda v: v / np.linalg.norm(np.atleast_2d(v), axis=1, keepdims=True), [v1, v2])
return v1 @ v2.T
def similarity_search_by_vector(v1, v2, topk=10):
"""
:param v1:
:param v2:
:param topk:
:return: idxs, scores
"""
dist = - cosine_similarity(v1, v2)
idxs = np.argsort(dist)[:, :topk]
scores = - np.take_along_axis(dist, idxs, -1) # 取出得分
return idxs, scores
def cooccurrence_matrix(texts, window_size=2):
"""
构建共现矩阵
:param texts: 文本列表
:param window_size: 单词之间的最大距离
:return: 共现矩阵
data_list = [
['I' ,'like','learning', 'like'],
['I' ,'like','playing'],
]
print(cooccurrence_matrix(data_list, 4))
"""
# 统计单词出现的次数
word_counts = defaultdict(int)
for text in texts:
for word in text:
word_counts[word] += 1
# 创建单词-id映射
word_to_id = {word: i for i, word in enumerate(word_counts)}
i2w = {i: w for w, i in word_to_id.items()}
# 初始化共现矩阵
matrix = np.zeros((len(word_counts), len(word_counts)))
# 统计共现次数
for text in tqdm(texts):
for i, j in combinations(range(len(text)), 2):
if abs(i - j) <= window_size:
word_i, word_j = text[i], text[j]
if word_i in word_to_id and word_j in word_to_id:
matrix[word_to_id[word_i], word_to_id[word_j]] += 1
matrix[word_to_id[word_j], word_to_id[word_i]] += 1
index = list(i2w.values())
columns = list(i2w.values())
return pd.DataFrame(matrix, index, columns)
if __name__ == "__main__":
import time
from sklearn.metrics.pairwise import cosine_similarity
x = np.random.rand(10, 128)
y = np.random.rand(1000000, 128)
idxs, scores = cosine_topk(x[:1], x)
print(idxs)
print(similarity_search_by_vector(x, y, 3)[0]) | PypiClean |
/Nbdler-3.0.3.tar.gz/Nbdler-3.0.3/nbdler/download.py | from concurrent.futures.thread import ThreadPoolExecutor
from nbdler.handler import (
SpeedAdjuster,
AIOReaderWriter,
BlockSlicer,
FileTempData,
ClientWorker,
URIStatusManager,
GatherException,
h, Handlers)
from .client import get_policy, ClientPolicy
from .version import VERSION
from .utils import forever_loop_in_executor
from traceback import format_exc
import weakref
import warnings
import asyncio
import os
__all__ = (
'Downloader',
)
class DownloadConfigure:
ADJUSTABLE = frozenset(
{'max_concurrent', 'max_speed', 'buffer_size', 'timeout', 'interval', 'client_policy'})
def __init__(self, resume_capability, max_concurrent, chunk_size, buffer_size, timeout=10,
max_speed=None, downloading_ext='.downloading', interval=0.5, client_policy=None, **kwargs):
self.version = VERSION
self.resume_capability = resume_capability
self.max_concurrent = max_concurrent
self.chunk_size = chunk_size
self.buffer_size = buffer_size
self.timeout = timeout
self.interval = interval
self.max_speed = max_speed
self.downloading_ext = downloading_ext
self.client_policy = client_policy
self.kwargs = kwargs
def set(self, **kwargs):
""" 设置配置。
Args:
**kwargs:
max_concurrent: 最大并发数
max_speed: 最大速度限制
buffer_size: 最大文件缓冲大小
timeout: 客户端连接接收超时时间
interval: 速度调节间隙
client_policy: 客户端处理策略
"""
attrs = set(kwargs).intersection(DownloadConfigure.ADJUSTABLE)
for attr in attrs:
self.__setattr__(attr, kwargs[attr])
def dumps(self):
opts = dict(self.__dict__)
client_policy = self.client_policy
opts['client_policy'] = dict(client_policy)
opts.update(opts.pop('kwargs'))
return opts
@classmethod
def loads(cls, dumpy):
config = cls(**dumpy)
if not isinstance(config.client_policy, ClientPolicy):
config.client_policy = get_policy(**config.client_policy)
return config
def __repr__(self):
return (f'<DownloadConfigure version={self.version} max_concurrent={self.max_concurrent} '
f'resume_capability={self.resume_capability}>')
class Downloader:
def __init__(self, file, uris, block_grp, *, handlers=None, **kwargs):
self.file = file
self.uris = uris
self.block_grp = block_grp
self.config = DownloadConfigure.loads(kwargs)
self._executor = None
self._loop = None
self._future = None
self._closed = False
self._handlers = Handlers()
if handlers is None:
handlers = []
buildin_handlers = [
ClientWorker,
SpeedAdjuster,
FileTempData,
AIOReaderWriter,
BlockSlicer,
GatherException,
URIStatusManager,
]
handlers.extend(buildin_handlers)
for handler in handlers:
if handler.name in self._handlers:
continue
if isinstance(handler, type):
handler = handler()
handler.add_parent(weakref.proxy(self))
self._handlers[handler.name] = handler
def exceptions(self, exception_type=None, *, just_new_exception=True):
""" 线程安全获取异常
以生成器的形式获取内部发生的异常,当下载任务暂停或者完成后将中断生成器的迭代。
使用方式:
for exception in dl.exceptions():
do_some_works(exception)
Args:
exception_type: 指定异常类型,可选ClientError、HandlerError。默认None则获取所有异常。
just_new_exception: 是否忽略当前时间前的旧异常,仅返回之后的新异常。
Yields:
内部出现的client或handler异常对象。
"""
yield from self._handlers.exception.acquire_threadsafe(
exception_type, just_new_exception=just_new_exception)
def aexceptions(self, exception_type=None, *, just_new_exception=True):
""" 异步返回异常错误。 具体参见exceptions()方法。
使用方式:
async for exception in dl.aexceptions():
do_some_works(exception)
"""
return self._handlers.exception.acquire(
exception_type, just_new_exception=just_new_exception)
async def astart(self):
""" 在当前事件循环中运行下载器。"""
if self._closed:
raise RuntimeError('Downloader is already closed.')
loop = asyncio.get_running_loop()
self._loop = loop
if self.block_grp.is_done_finished():
raise RuntimeError('download is completed.')
self._future = loop.create_future()
async def handler_worker(hd):
try:
return await hd.start()
except BaseException as err:
h.exception.handler_error(err)
self.pause(0)
with h.enter(self._handlers, loop):
self.block_grp.activate()
# prepare()
await self._handlers.prepare()
# start()
result = await asyncio.gather(
*[handler_worker(handler) for handler in h.iter_all()]
)
# join()
await self._handlers.join()
self.block_grp.deactivate()
self._future.set_result(result)
def start(self, *, loop=None):
""" 在指定的循环中运行下载器。
若loop=None不指定事件循环,那么将创建新的线程作为下载器的事件循环。
Args:
loop: 指定事件循环运行下载器
Returns:
返回下载器运行的concurrent.future.Future对象
"""
if self._closed:
raise RuntimeError('Downloader is already closed.')
if self.block_grp.is_finished():
raise RuntimeError('download is already finished.')
if self._loop is not None:
loop = self._loop
if loop is None:
def cb(f):
nonlocal executor
executor.shutdown(False)
executor = ThreadPoolExecutor(
max_workers=1, thread_name_prefix=f'Downloader {self.file.name} {self.file.size}')
exec_fut = forever_loop_in_executor(executor)
exec_fut.add_done_callback(cb)
self._executor = executor
loop = exec_fut.get_loop()
fut = asyncio.run_coroutine_threadsafe(self.astart(), loop=loop)
self._loop = loop
return fut
async def apause(self):
""" 异步暂停等待。"""
if self._closed:
raise RuntimeError('Downloader is already closed.')
result = await self._await_loopsafe(self._handlers.pause())
await self.ajoin()
return result
async def aclose(self):
""" 异步关闭下载器。"""
if self._closed:
raise RuntimeError('Downloader is already closed.')
if not self._future.done():
raise RuntimeError('cannot close a running Downloader.')
result = await self._await_loopsafe(self._handlers.close())
await self.ajoin()
self._closed = True
if self._executor:
self._loop.call_soon_threadsafe(self._loop.stop)
# 若文件已完毕,去除.downloading后缀
if self.block_grp.is_done_finished():
file = self.file
filepath = f'{file.pathname}{self.config.downloading_ext}'
start_filepath = file.pathname
target_filepath = start_filepath
postfix = 0
while True:
try:
os.rename(filepath, target_filepath)
except FileExistsError:
postfix += 1
target_filepath = os.path.join(file.path, file.number_name(postfix))
else:
if postfix != 0:
file.name = file.number_name(postfix)
break
# 删除下载配置文件
os.unlink(f'{start_filepath}{self.config.downloading_ext}.cfg')
return result
async def ajoin(self):
""" 异步等待下载器结束。"""
if self._closed:
raise RuntimeError('Downloader is already closed.')
return await self._await_loopsafe(self._future)
async def _await_loopsafe(self, *coros_or_futures):
""" 事件循环安全的异步等待。
Args:
*coros_or_futures: coroutine或future对象列表。
Returns:
返回coros_or_futures的返回结果列表。
"""
current_loop = asyncio.get_running_loop()
loop = self._loop
if loop is None:
loop = current_loop
async def _execute_loop():
with h.enter(self._handlers):
r = await asyncio.gather(*coros_or_futures)
return r
fut = asyncio.run_coroutine_threadsafe(_execute_loop(), loop)
result = await asyncio.wrap_future(fut)
return result
def _call_threadsafe(self, coroutine, timeout=None):
""" 下载器的异步操作线程安全化。
Args:
coroutine: 异步操作协程
timeout: 超时等待事件
Returns:
当timeout=0时,返回concurrent.future.Future对象,
否则,协程coroutine的执行结果或抛出超时异常。
"""
loop = self._loop
assert loop
future = asyncio.run_coroutine_threadsafe(coroutine, loop)
if timeout == 0:
return future
return future.result(timeout)
def pause(self, timeout=None):
""" 线程安全暂停下载器。具体参见apause方法"""
if self._closed:
raise RuntimeError('Downloader is already closed.')
return self._call_threadsafe(self.apause(), timeout=timeout)
def close(self, timeout=None):
""" 线程安全关闭下载器。具体参见aclose方法"""
if self._closed:
raise RuntimeError('Downloader is already closed.')
return self._call_threadsafe(self.aclose(), timeout=timeout)
def join(self, timeout=None):
""" 线程安全等待下载器。具体参见ajoin方法"""
if self._closed:
raise RuntimeError('Downloader is already closed.')
return self._call_threadsafe(self.ajoin(), timeout=timeout)
def dumps(self):
dumpy = {
'config': self.config.dumps(),
'file': self.file.dumps(),
'uris': self.uris.dumps(),
'block_grp': self.block_grp.dumps(),
}
return dumpy
@classmethod
def loads(cls, dumpy, handlers=None):
from nbdler.uri import URIs
from nbdler.file import File
from nbdler.block import BlockGroup
uris = URIs.loads(dumpy['uris'])
file = File(**dumpy['file'])
block_grp = BlockGroup.loads(dumpy['block_grp'])
return cls(file, uris, block_grp, handlers=handlers, **dumpy['config'])
transfer_rate = property(lambda self: self.block_grp.transfer_rate)
average_speed = property(lambda self: self.block_grp.average_speed)
walk_length = property(lambda self: self.block_grp.walk_length)
done_length = property(lambda self: self.block_grp.done_length)
remaining_length = property(lambda self: self.block_grp.remaining_length)
remaining_time = property(lambda self: self.block_grp.remaining_time)
percent_complete = property(lambda self: self.block_grp.percent_complete)
is_walk_finished = property(lambda self: self.block_grp.is_walk_finished)
is_done_finished = property(lambda self: self.block_grp.is_done_finished)
def is_finished(self):
""" 返回文件是否下载完毕。"""
return self.block_grp.is_finished() and (not self._future or self._future.done())
def set_config(self, **kwargs):
""" 配置下载器。参见DownloadConfigure.set()方法。"""
self.config.set(**kwargs)
def __repr__(self):
running = False
if self._future is not None and not self._future.done():
running = True
return f'<Downloader filename={self.file.name} running={running} closed={self._closed}>'
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
return await self.aclose()
def __del__(self, _warnings=warnings):
if not self._closed:
self.close() | PypiClean |
/MapProxy-1.16.0.tar.gz/MapProxy-1.16.0/mapproxy/response.py | import hashlib
from mapproxy.util.times import format_httpdate, parse_httpdate, timestamp
from mapproxy.compat import PY2, text_type, iteritems
class Response(object):
charset = 'utf-8'
default_content_type = 'text/plain'
block_size = 1024 * 32
def __init__(self, response, status=None, content_type=None, mimetype=None):
self.response = response
if status is None:
status = 200
self.status = status
self._timestamp = None
self.headers = {}
if mimetype:
if mimetype.startswith('text/'):
content_type = mimetype + '; charset=' + self.charset
else:
content_type = mimetype
if content_type is None:
content_type = self.default_content_type
self.headers['Content-type'] = content_type
if content_type.startswith(('text/', 'application/')):
# Capability documents can be dependent on the value of a few X-headers.
# Tell this caching proxies via the Vary HTTP header. This also prevents
# malicious cache poisoning.
self.headers['Vary'] = 'X-Script-Name, X-Forwarded-Host, X-Forwarded-Proto'
def _status_set(self, status):
if isinstance(status, int):
status = status_code(status)
self._status = status
def _status_get(self):
return self._status
status = property(_status_get, _status_set)
def _last_modified_set(self, date):
if not date: return
self._timestamp = timestamp(date)
self.headers['Last-modified'] = format_httpdate(self._timestamp)
def _last_modified_get(self):
return self.headers.get('Last-modified', None)
last_modified = property(_last_modified_get, _last_modified_set)
def _etag_set(self, value):
self.headers['ETag'] = value
def _etag_get(self):
return self.headers.get('ETag', None)
etag = property(_etag_get, _etag_set)
def cache_headers(self, timestamp=None, etag_data=None, max_age=None, no_cache=False):
"""
Set cache-related headers.
:param timestamp: local timestamp of the last modification of the
response content
:param etag_data: list that will be used to build an ETag hash.
calls the str function on each item.
:param max_age: the maximum cache age in seconds
"""
if etag_data:
hash_src = ''.join((str(x) for x in etag_data)).encode('ascii')
self.etag = hashlib.md5(hash_src).hexdigest()
if no_cache:
assert not timestamp and not max_age
self.headers['Cache-Control'] = 'no-cache, no-store'
self.headers['Pragma'] = 'no-cache'
self.headers['Expires'] = '-1'
self.last_modified = timestamp
if (timestamp or etag_data) and max_age is not None:
self.headers['Cache-control'] = 'public, max-age=%d, s-maxage=%d' % (max_age, max_age)
def make_conditional(self, req):
"""
Make the response conditional to the HTTP headers in the CGI/WSGI `environ`.
Checks for ``If-none-match`` and ``If-modified-since`` headers and compares
to the etag and timestamp of this response. If the content was not modified
the repsonse will changed to HTTP 304 Not Modified.
"""
if req is None:
return
environ = req.environ
not_modified = False
if self.etag == environ.get('HTTP_IF_NONE_MATCH', -1):
not_modified = True
elif self._timestamp is not None:
date = environ.get('HTTP_IF_MODIFIED_SINCE', None)
timestamp = parse_httpdate(date)
if timestamp is not None and self._timestamp <= timestamp:
not_modified = True
if not_modified:
self.status = 304
self.response = []
if 'Content-type' in self.headers:
del self.headers['Content-type']
@property
def content_length(self):
return int(self.headers.get('Content-length', 0))
@property
def content_type(self):
return self.headers['Content-type']
@property
def data(self):
if hasattr(self.response, 'read'):
return self.response.read()
else:
return b''.join(chunk.encode() for chunk in self.response)
@property
def fixed_headers(self):
headers = []
for key, value in iteritems(self.headers):
if type(value) != text_type:
# for str subclasses like ImageFormat
value = str(value)
if PY2 and isinstance(value, unicode):
value = value.encode('utf-8')
headers.append((key, value))
return headers
def __call__(self, environ, start_response):
if hasattr(self.response, 'read'):
if ((not hasattr(self.response, 'ok_to_seek') or
self.response.ok_to_seek) and
(hasattr(self.response, 'seek') and
hasattr(self.response, 'tell'))):
self.response.seek(0, 2) # to EOF
self.headers['Content-length'] = str(self.response.tell())
self.response.seek(0)
if 'wsgi.file_wrapper' in environ:
resp_iter = environ['wsgi.file_wrapper'](self.response, self.block_size)
else:
resp_iter = iter(lambda: self.response.read(self.block_size), b'')
elif not self.response:
resp_iter = iter([])
elif isinstance(self.response, text_type):
self.response = self.response.encode(self.charset)
self.headers['Content-length'] = str(len(self.response))
resp_iter = iter([self.response])
elif isinstance(self.response, bytes):
self.headers['Content-length'] = str(len(self.response))
resp_iter = iter([self.response])
else:
resp_iter = self.response
start_response(self.status, self.fixed_headers)
return resp_iter
def iter_encode(self, chunks):
for chunk in chunks:
if isinstance(chunk, text_type):
chunk = chunk.encode(self.charset)
yield chunk
# http://www.faqs.org/rfcs/rfc2616.html
_status_codes = {
100: 'Continue',
101: 'Switching Protocols',
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Time-out',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request-URI Too Large',
415: 'Unsupported Media Type',
416: 'Requested range not satisfiable',
417: 'Expectation Failed',
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Time-out',
505: 'HTTP Version not supported',
}
def status_code(code):
return str(code) + ' ' + _status_codes[code] | PypiClean |
/GearMess_client-0.3.2-py3-none-any.whl/client_src/client.py | import socket
from time import sleep
from client_src.JIM.JIMs import Jim, MessageConverter
from client_src.JIM.jim_config import *
from client_src.client_storage_handler import ClientStorageHandler
from client_src.crypto.crypto import *
BUF_SIZE = 1024
ADDR = 'localhost'
PORT = 7777
class Client:
def __init__(self, name, addr='localhost', port=7777):
self.name = name
self.address = addr, port
self.soc = None
# TODO: при регистрации генерировать соль и записывать в базу или файл
self.salt = b'e690a758702ee2f78e4ae5d1327f52d246c82a6eda3648d25c3806142717e5d3'
self.message_key = '40tisachobezianvzhopusunulibanan' # (с) С.Лукьяненко - Лабиринт Отражений.
self.message_maker = Jim(self.name)
self.converter = MessageConverter()
self.storage_handler = ClientStorageHandler(self.name)
def connect_to_server(self):
""" Making socket and connecting to server_src"""
self.soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.soc.connect(self.address)
self.soc.settimeout(1)
def send_authorisation(self, password):
""" Sending authorisation-message to server_src, returns received response """
self.connect_to_server()
authorisation_ = self.message_maker.create(action=AUTHORISE)
authorisation = self.converter(authorisation_)
self.soc.send(authorisation)
serv_answ = self.receive()[ALERT]
key = make_password(self.salt, password)
answ = crypt_message(key, serv_answ)
answer = self.message_maker.create(action=AUTHORISE, answer=answ)
answer = self.converter(answer)
self.soc.send(answer)
resp = self.receive()
return resp
def send_registration(self, password):
""" Sending registration-message to server_src, returns received response """
passw = make_password(self.salt, password)
self.connect_to_server()
registration = self.message_maker.create(action=REGISTER, password=passw)
registration = self.converter(registration)
self.soc.send(registration)
resp = self.receive()
return resp
def add_contact(self, contact_name):
""" Sending action-message with contact name to add """
action = self.message_maker.create(action=ADD_CONTACT, contact=contact_name)
action = self.converter(action)
self.soc.send(action)
self.storage_handler.add_contact(contact_name)
def del_contact(self, contact_name):
""" Sending action-message with contact name to delete, returns received response """
action = self.message_maker.create(action=DEL_CONTACT, contact=contact_name)
action = self.converter(action)
self.soc.send(action)
self.storage_handler.del_contact(contact_name)
def get_contacts(self):
""" Sending action-message to get contact-list, printing contacts received from server_src """
action = self.message_maker.create(action=GET_CONTACTS)
action = self.converter(action)
self.soc.send(action)
def quit_server(self):
quit_msg = self.message_maker.create(action=QUIT)
quit_msg = self.converter(quit_msg)
self.soc.send(quit_msg)
sleep(0.2)
self.soc.close()
def send_presence(self):
msg = self.message_maker.create(action=PRESENCE)
msg = self.converter(msg)
self.soc.send(msg)
def receive(self):
""" Receiving message from server, checking for message-action,
if so - returns string in format SENDER'S_NAME: MESSAGE."""
try:
mess = self.soc.recv(BUF_SIZE)
except socket.timeout:
pass
else:
if mess:
dmess = self.converter(mess)
if dmess.get(MESSAGE):
uncry_message = decrypt_message(self.message_key, dmess[MESSAGE])
dmess[MESSAGE] = uncry_message
# self.storage_handler.log_incoming_message(dmess)
return dmess
def send_message(self, message, to):
""" Making dict message-action and sending to server_src
"""
message_ = self.message_maker.create(action=MSG, message=message, to=to)
self.storage_handler.log_outgoing_message(message_)
crypted_message = crypt_message(self.message_key, message)
message_[MESSAGE] = crypted_message
message_to_send = self.converter(message_)
self.soc.send(message_to_send) | PypiClean |
/Copreus-0.4.0.tar.gz/Copreus-0.4.0/copreus/drivermanager/driverfactory.py | import copreus.drivers
import importlib
import pelops.logging.mylogger as mylogger
class DriverFactory:
factory_logger = None
mqtt_client = None
logger = None
spi_lock = None
i2c_lock = None
#driver = None
def __init__(self, mqtt_client=None, logger=None, spi_lock=None, i2c_lock=None):
self.mqtt_client = mqtt_client
self.logger = logger
self.spi_lock = spi_lock
self.i2c_lock = i2c_lock
self.factory_logger = mylogger.get_child(logger, self.__class__.__name__)
def create_drivers(self, config):
"""Static drivers factory - takes a list of driver configs and creates them."""
self.factory_logger.info("creating drivers")
self.factory_logger.debug("driver configs: ".format(config["drivers"]))
drivers = {}
for driver_config in config["drivers"]:
if not driver_config["active"]:
continue
config["driver"] = driver_config
driver = self.create_driver(config)
driver_id = self.get_unique_name(drivers, driver.get_name())
drivers[driver_id] = driver
self.factory_logger.info(" - added driver '[{}]: {}.{}'".format(driver_id, driver._type, driver._name))
self.factory_logger.debug("drivers: {}".format(drivers))
return drivers
@staticmethod
def get_unique_name(drivers, name):
prefix = name.replace(" ", "").lower()
suffix = ""
counter = 0
name = prefix+suffix
while name in drivers:
suffix = "_{}".format(counter)
counter += 1
name = prefix + suffix
return name
def create_driver(self, config):
"""Static driver factory - takes a driver entry from json/yaml config and instantiates the corresponding
Class. Classes that are specializations of ASPI are provided with the spi_lock (if one is provided to this
factory).
New implemented driver must be added manually."""
type_name = config["driver"]["type"].upper()
# it is on purpose that not class names are used to be compared with type_name (as will be done within the
# constructor of the base class ADriver). This approach allows for late binding - thus, a class is imported
# if and only if it is needed which results in less dependencies that must be fullfilled although they might
# not be needed.
drivers = copreus.drivers.get_drivers()
try:
driver = drivers[type_name]
mod = importlib.import_module(driver["module"])
klass = getattr(mod, driver["name"])
except:
logger = mylogger.get_child(self.logger, __name__)
logger.error("unknown type name '{}'.".format(type_name))
raise ValueError("unknown type name '{}'.".format(type_name))
if "ASPI" in driver["bases"]:
result = klass(config, self.mqtt_client, self.logger, spi_lock=self.spi_lock, no_gui=True,
manage_monitoring_agent=False)
elif "AI2C" in driver["bases"]:
result = klass(config, self.mqtt_client, self.logger, i2c_lock=self.i2c_lock, no_gui=True,
manage_monitoring_agent=False)
else:
result = klass(config, self.mqtt_client, self.logger, no_gui=True,
manage_monitoring_agent=False)
return result
def reload_driver(self, drivers, driver_id):
"""destroy the driver and creates a new one from scratch"""
self.factory_logger.info("restarting driver '{}' - start".format(driver_id))
driver = drivers[driver_id]
self.factory_logger.info("... {}".format(driver.get_short_info()))
driver.stop()
config = {}
config["driver"] = driver.get_config()
new_driver = self.create_driver(config)
drivers[driver_id] = new_driver
new_driver.start()
self.factory_logger.info("restarting driver {} - done")
@staticmethod
def old_create(config, mqtt_client, logger, spi_lock=None, i2c_lock=None):
type_name = config["type"].upper()
if type_name == "ADC":
from copreus.drivers.adc import ADC
result = ADC(config, mqtt_client, logger, spi_lock, no_gui=True, manage_monitoring_agent=False)
elif type_name == "DAC":
from copreus.drivers.dac import DAC
result = DAC(config, mqtt_client, logger, spi_lock, no_gui=True, manage_monitoring_agent=False)
elif type_name == "BME_280":
from copreus.drivers.bme_280 import BME_280
result = BME_280(config, mqtt_client, logger, i2c_lock, no_gui=True, manage_monitoring_agent=False)
elif type_name == "DHT":
from copreus.drivers.dht import DHT
result = DHT(config, mqtt_client, logger, no_gui=True, manage_monitoring_agent=False)
elif type_name == "RFID":
from copreus.drivers.rfid import RFID
result = RFID(config, mqtt_client, logger, no_gui=True, manage_monitoring_agent=False)
elif type_name == "RGBLED":
from copreus.drivers.rgbled import RGBLed
result = RGBLed(config, mqtt_client, logger, no_gui=True, manage_monitoring_agent=False)
elif type_name == "EPAPERDIRECT":
from copreus.drivers.epaperdirect import EPaperDirect
result = EPaperDirect(config, mqtt_client, logger, spi_lock, no_gui=True, manage_monitoring_agent=False)
elif type_name == "EPAPERSIMPLE":
from copreus.drivers.epapersimple import EPaperSimple
result = EPaperSimple(config, mqtt_client, logger, spi_lock, no_gui=True, manage_monitoring_agent=False)
elif type_name == "INPUT":
from copreus.drivers.input import Input
result = Input(config, mqtt_client, logger, no_gui=True, manage_monitoring_agent=False)
elif type_name == "POLLINGINPUT":
from copreus.drivers.pollinginput import PollingInput
result = PollingInput(config, mqtt_client, logger, no_gui=True, manage_monitoring_agent=False)
elif type_name == "OUTPUT":
from copreus.drivers.output import Output
result = Output(config, mqtt_client, logger, no_gui=True, manage_monitoring_agent=False)
elif type_name == "ROTARYENCODER":
from copreus.drivers.rotaryencoder import RotaryEncoder
result = RotaryEncoder(config, mqtt_client, logger, no_gui=True, manage_monitoring_agent=False)
elif type_name == "ROTARYENCODER2":
from copreus.drivers.rotaryencoder2 import RotaryEncoder2
result = RotaryEncoder2(config, mqtt_client, logger, no_gui=True, manage_monitoring_agent=False)
else:
logger = mylogger.get_child(logger, __name__)
logger.error("unknown type name '{}'.".format(type_name))
raise ValueError("unknown type name '{}'.".format(type_name))
return result | PypiClean |
/CANberry-0.4.tar.gz/CANberry-0.4/canberry/utils.py | from __future__ import print_function, absolute_import, division
import os
import math
import time
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import SafeConfigParser as ConfigParser
from .can_utils import Service
__author__ = 'Florian Wilhelm'
__copyright__ = 'Florian Wilhelm'
def list_attributes(obj):
"""
Lists all attributes of an object or class
:param obj: object or class
:return: dictionary of user-defined attributes
"""
return {k: v for k, v in vars(obj).items() if not k.startswith('__')}
def add_timestamp(dct):
"""
Adds a timestamp attribute in miliseconds to a dictionary
:param dct: dictionary
"""
dct['timestamp'] = time.time() * 1000
def static_vars(**kwargs):
"""
Decorator for adding a static variable to a function
:param kwargs: initializations of the static variables
"""
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
class DummySensor(object):
"""
A dummy sensor for test purposes
"""
def __init__(self, trans=0., scale=1.):
self.trans = trans
self.scale = scale
self.freq = 1.
def read(self):
response = {
Service.READ_PARAM: (self.scale*math.sin(self.freq*time.time()) +
self.trans),
Service.READ_MIN: -self.scale + self.trans,
Service.READ_MAX: self.scale + self.trans,
Service.READ_DEFAULT: self.trans,
Service.READ_SCALE: 1.}
return response
def set(self, freq):
self.freq = float(freq)
def str2bool(txt):
"""
Convert a string to a boolean
:param txt: string object
:return: boolean
"""
if txt.lower() in ['1', 'true', 'yes', 'y']:
return True
elif txt.lower() in ['0', 'false', 'no', 'n']:
return False
else:
raise ValueError("Can't convert \"{}\" to a boolean".format(txt))
def read_config():
"""
Read the configuration files .canrc, can.conf etc. as defined by python
can in order to retrieve all settings from the section [canberry].
:return: dictionary
"""
from can.util import CONFIG_FILES
config = ConfigParser()
config.read([os.path.expanduser(path) for path in CONFIG_FILES])
if not config.has_section('canberry'):
raise RuntimeError("Please add a section canberry to your CAN config!")
cfg = {key: val for key, val in config.items('canberry')}
# Map configuration values to the right data type and set defaults
cfg['identifier'] = int(cfg.get('identifier', '0'))
cfg['external'] = str2bool(cfg.get('external', 'true'))
cfg['debug'] = str2bool(cfg.get('debug', 'false'))
return cfg | PypiClean |
/Nuitka_fixed-1.1.2-cp310-cp310-win_amd64.whl/nuitka/build/inline_copy/lib/scons-4.4.0/SCons/Tool/clang.py | # Based on SCons/Tool/gcc.py by Paweł Tomulik 2014 as a separate tool.
# Brought into the SCons mainline by Russel Winder 2017.
import os
import re
import subprocess
import SCons.Util
import SCons.Tool.cc
from SCons.Tool.clangCommon import get_clang_install_dirs
from SCons.Tool.MSCommon import msvc_setup_env_once
compilers = ['clang']
def generate(env):
"""Add Builders and construction variables for clang to an Environment."""
SCons.Tool.cc.generate(env)
if env['PLATFORM'] == 'win32':
# Ensure that we have a proper path for clang
clang = SCons.Tool.find_program_path(env, compilers[0],
default_paths=get_clang_install_dirs(env['PLATFORM']))
if clang:
clang_bin_dir = os.path.dirname(clang)
env.AppendENVPath('PATH', clang_bin_dir)
# Set-up ms tools paths
msvc_setup_env_once(env)
env['CC'] = env.Detect(compilers) or 'clang'
if env['PLATFORM'] in ['cygwin', 'win32']:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS')
else:
env['SHCCFLAGS'] = SCons.Util.CLVar('$CCFLAGS -fPIC')
# determine compiler version
if env['CC']:
# pipe = SCons.Action._subproc(env, [env['CC'], '-dumpversion'],
pipe = SCons.Action._subproc(env, [env['CC'], '--version'],
stdin='devnull',
stderr='devnull',
stdout=subprocess.PIPE)
if pipe.wait() != 0: return
# clang -dumpversion is of no use
with pipe.stdout:
line = pipe.stdout.readline()
line = line.decode()
match = re.search(r'clang +version +([0-9]+(?:\.[0-9]+)+)', line)
if match:
env['CCVERSION'] = match.group(1)
env['CCDEPFLAGS'] = '-MMD -MF ${TARGET}.d'
env["NINJA_DEPFILE_PARSE_FORMAT"] = 'clang'
def exists(env):
return env.Detect(compilers)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4: | PypiClean |
/Cuttlefish-0.3.tar.gz/Cuttlefish-0.3/README.txt | Cuttlefish is an example project using the Bottle flyweight Web
framework and the Mako template engine in Python. It's intended to run
on your desktop, and provide a lightweight search engine (using grep)
for your source code.
[[[NOTE]]]
This is an early development alpha. It basically works, but there are
many known issues yet to be fixed.
The cuttlefish command-line tool runs the app using Bottle's
WSGIRefServer support. The cuttlefish-config.plist is in this case read
directly from the cuttlefish package, which is not nice. This will get
fixed, eventually, but in the meantime it's convenient for my debugging.
There is a cuttlefish.wsgi file also embedded in the cuttlefish package.
This can be copied to wherever Apache w/ mod_wsgi would like to see it.
In this case, the cuttlefish-config.plist should either be copied
alongside cuttlefish.wsgi or the latter should be edited to point to
wherever your customized cuttlefish-config.plist resides.
-- Kaelin 10/26/2009
| PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/core/pmtypes/base.py | import inspect
import types
import operator
import json
from JumpScale import j
from JumpScale.core.baseclasses.dirtyflaggingmixin import \
DIRTY_PROPERTIES_ATTRIBUTE, DIRTY_AFTER_LAST_SAVE_ATTRIBUTE
#Needed to handle default values
#We want NO_DEFAULT and NO_VALUE as some special instances because we can't
#use 'None': if we'd use None as discriminator whether a default value is
#provided by a class author/spec writer, it would become impossible to use
#None itself as a default value.
#NO_VALUE is used to check whether a certain attribute is already set. We
#can't use None as a discriminator here as well for the same reason.
NO_DEFAULT = object()
NO_VALUE = object()
#Used as 'ignore' value, which disables call of the internal set_impl in the
#descriptor __set__ when returned from a given fset generator
IGNORE = object()
class BaseType(object):
'''Base class for all defined types'''
def __init__(self, default=NO_DEFAULT, check=None, doc=None,
allow_none=False, flag_dirty=False, fset=None, readonly=False):
'''Initialize a new typed property
@param default: Default value
@type default: object or callable
@param check: Custom checker callable (func(self, value) -> bool)
@type check: callable
@param doc: Docstring for this descriptor
@type doc: string
@param allow_none: Allow setting of 'None' as value
@type allow_none: bool
@param flag_dirty: Enable dirty flagging for this property
@type flag_dirty: bool
@param readonly: Read-only attribute
@type readonly: bool
@note: The fset parameter should not be used unless you know how it
works. See the testcase.
'''
#Need this since we want to store all constructor arguments on the
#object
varnames, _, _, locals_ = inspect.getargvalues(inspect.currentframe())
#We don't use None as default since None could be the desired default
self._default = default
self._check = check
self._allow_none = allow_none
self._flag_dirty = flag_dirty
self._fset = fset
self._readonly = readonly
if doc:
self.__doc__ = doc
#Store constructor arguments
self._constructor_args = dict((name, locals_[name]) \
for name in varnames)
#__get__, __set__ and __delete__ -> descriptor protocol
def __get__(self, obj, objtype=None):
try:
return getattr(obj, self.attribute_name)
except AttributeError:
pass
if self._default is not NO_DEFAULT:
value = self.get_default(obj)
setattr(obj, self.attribute_name, value)
return value
raise AttributeError('\'%s\' has no attribute \'%s\'' % \
(obj.__class__.__name__, self._name))
def __set__(self, obj, new_value):
if self._readonly:
raise AttributeError('can\'t set attribute: readonly attribute')
if self._fset:
helper = self._fset(obj, new_value)
if not isinstance(helper, types.GeneratorType):
raise RuntimeError(
'fset parameter of BaseType should return a generator')
real_new_value = helper.next()
if real_new_value is not IGNORE:
saved_value = self._set_impl(obj, real_new_value)
else:
saved_value = IGNORE
try:
helper.send(saved_value)
helper.next()
except StopIteration:
pass
else:
raise RuntimeError(
'Provided fset generator generates more than one value')
else:
self._set_impl(obj, new_value)
def __delete__(self, obj):
#TODO Check whether we want dirty flagging here
#TODO Check with Peter/Kurt -> Should 'logical delete' go in here?
delattr(obj, self.attribute_name)
constructor_args = property(fget=lambda s: s._constructor_args)
def _set_name(self, value):
self._name = value
self.attribute_name = '_pm_%s' % value
_PM_NAME = property(fget=operator.attrgetter('_name'), fset=_set_name)
def _check_value(self, obj, value):
'''Check the provided value for this type
@param value: Value to check
@type value: object
@returns: Whether the provided value is valid
@rtype: bool
'''
#If None is allowed, let it pass
#Other checks could fail otherwise
if value is None and self._allow_none:
return True
#Type-specific check
if not self.check(value):
return False
#Object-class-specific check
if callable(self._check) and not self._check(obj, value):
return False
return True
def get_default(self, obj):
'''Get default value for descriptor attribute
@returns: Default value
@rtype: object
@raises RuntimeError: If the (generated or constant) default value is not valid for this type
'''
if self._default is not NO_DEFAULT:
#Check whether default is a callable
#If it is, execute it providing obj as argument
#This way default could be something like this:
# class CallableDefault(BaseType):
# def _get_i_square(self):
# return self.i ** 2
#
# i = Integer(default=2)
# s = Integer(default=_get_i_square)
if callable(self._default):
value = self._default(obj)
else:
value = self._default
if not self._check_value(obj, value):
msg = ('Default value %r of property %s on %s is invalid' % \
(value, self._PM_NAME, obj.__class__.__name__))
j.logger.log(msg, 5)
raise RuntimeError(msg)
return value
return None
def _set_impl(self, obj, value):
#Check whether the value is valid
if not self._check_value(obj, value):
#Generic error string
err = '%s property of %s should be a valid %s, %r is not' % \
(self._PM_NAME, obj.__class__.__name__,
self.__class__.__name__, value)
raise ValueError(err)
#Flag the object as dirty, if necessary
if self._flag_dirty:
#Check whether new value is not equal to the old one
current_value = getattr(obj, self.attribute_name, NO_VALUE)
if current_value is not value:
dirty_attributes = getattr(obj, DIRTY_PROPERTIES_ATTRIBUTE,
set())
dirty_attributes.add(self._PM_NAME)
setattr(obj, DIRTY_PROPERTIES_ATTRIBUTE, dirty_attributes)
setattr(obj, DIRTY_AFTER_LAST_SAVE_ATTRIBUTE, True)
#Finally, set the internal attribute to value
setattr(obj, self.attribute_name, value)
return value
@classmethod
def checkString(cls, s):
try:
s = cls.fromString(s)
except ValueError:
return False
return cls.check(s)
@classmethod
def fromString(cls, s):
return json.loads(s)
@classmethod
def toString(cls, s):
return json.dumps(s) | PypiClean |
/Camelot-13.04.13-gpl-pyqt.tar.gz/Camelot-13.04.13-gpl-pyqt/camelot/core/orm/fields.py | from sqlalchemy import schema, orm
from . properties import Property
from . statements import ClassMutator
"""
This module provides support for defining the fields (columns) of your
entities. This module sole reason of existence is to keep existing Elixir
model definitions working. Do not use it when writing new code, instead
use Declarative directly.
Two syntaxes are supported, the default attribute-based syntax as well as
the `has_field` DSL statement.
Attribute-based syntax
----------------------
Here is a quick example of how to use the object-oriented syntax.
.. sourcecode:: python
class Person(Entity):
id = Field(Integer, primary_key=True)
name = Field(String(50), required=True)
ssn = Field(String(50), unique=True)
biography = Field(Text)
join_date = Field(DateTime, default=datetime.datetime.now)
photo = Field(Binary, deferred=True)
_email = Field(String(20), colname='email', synonym='email')
def _set_email(self, email):
self._email = email
def _get_email(self):
return self._email
email = property(_get_email, _set_email)
The Field class takes one mandatory argument, which is its type. Please refer
to SQLAlchemy documentation for a list of `types supported by SQLAlchemy
<http://docs.sqlalchemy.org/en/rel_0_7/core/types.html>`_.
Following that first mandatory argument, fields can take any number of
optional keyword arguments. Please note that all the **arguments** that are
**not specifically processed by the Camelot orm module**, as mentioned in the
documentation below **are passed on to the
:class:`sqlalchemy:sqlalchemy.schema.Column` object**.
The following non SQLAlchemy-specific arguments are supported:
+-------------------+---------------------------------------------------------+
| Argument Name | Description |
+===================+=========================================================+
| ``required`` | Specify whether or not this field can be set to None |
| | (left without a value). Defaults to ``False``, unless |
| | the field is a primary key. |
+-------------------+---------------------------------------------------------+
| ``colname`` | Specify a custom name for the column of this field. By |
| | default the column will have the same name as the |
| | attribute. |
+-------------------+---------------------------------------------------------+
| ``deferred`` | Specify whether this particular column should be |
| | fetched by default (along with the other columns) when |
| | an instance of the entity is fetched from the database |
| | or rather only later on when this particular column is |
| | first referenced. This can be useful when one wants to |
| | avoid loading a large text or binary field into memory |
| | when its not needed. Individual columns can be lazy |
| | loaded by themselves (by using ``deferred=True``) |
| | or placed into groups that lazy-load together (by using |
| | ``deferred`` = `"group_name"`). |
+-------------------+---------------------------------------------------------+
| ``synonym`` | Specify a synonym name for this field. The field will |
| | also be usable under that name in keyword-based Query |
| | functions such as filter_by. The Synonym class (see the |
| | `properties` module) provides a similar functionality |
| | with an (arguably) nicer syntax, but a limited scope. |
+-------------------+---------------------------------------------------------+
has_field
---------
The `has_field` statement allows you to define fields one at a time.
The first argument is the name of the field, the second is its type. Following
these, any number of keyword arguments can be specified for additional
behavior.
Here is a quick example of how to use ``has_field``.
.. sourcecode:: python
class Person(Entity):
has_field('id', Integer, primary_key=True)
has_field('name', String(50))
"""
class Field(Property):
'''
Represents the definition of a 'field' on an entity.
This class represents a column on the table where the entity is stored.
'''
def __init__(self, type, *args, **kwargs):
super(Field, self).__init__()
self.colname = kwargs.pop('colname', None)
self.synonym = kwargs.pop('synonym', None)
self.deferred = kwargs.pop('deferred', False)
if 'required' in kwargs:
kwargs['nullable'] = not kwargs.pop('required')
self.type = type
self.primary_key = kwargs.get('primary_key', False)
self.property = None
self.column = None
self.column_created = False
self.args = args
self.kwargs = kwargs
def attach(self, entity, name):
# If no colname was defined (through the 'colname' kwarg), set
# it to the name of the attr.
if self.colname is None:
self.colname = name
super(Field, self).attach(entity, name)
def create_pk_cols(self):
if self.primary_key:
self.create_col()
def create_non_pk_cols(self):
if not self.primary_key:
self.create_col()
def create_col( self ):
if self.column_created:
return
self.column = schema.Column( self.colname, self.type, *self.args, **self.kwargs )
self.column_created = True
if self.deferred:
group = None
if isinstance(self.deferred, basestring):
group = self.deferred
self.column = orm.deferred( self.column, group = group )
self.entity._descriptor.add_column( self.kwargs.get( 'key', self.name ), self.column )
def create_properties(self):
if self.property is not None:
self.entity._descriptor.add_property( self.name, self.property )
if self.synonym:
self.entity._descriptor.add_property( self.synonym, orm.synonym( self.name ) )
class has_field( ClassMutator ):
def process( self, entity_dict, name, *args, **kwargs ):
entity_dict[ name ] = Field( *args, **kwargs ) | PypiClean |
/D-Analyst-1.0.6.tar.gz/D-Analyst-1.0.6/main/analyst/visuals/mesh_visual.py | import numpy as np
from .visual import Visual
from analyst import get_color
__all__ = ['normalize',
'projection_matrix', 'rotation_matrix', 'scale_matrix',
'translation_matrix', 'camera_matrix',
'MeshVisual']
def normalize(x):
"""Normalize a vector or a set of vectors.
Arguments:
* x: a 1D array (vector) or a 2D array, where each row is a vector.
Returns:
* y: normalized copies of the original vector(s).
"""
if x.ndim == 1:
return x / np.sqrt(np.sum(x ** 2))
elif x.ndim == 2:
return x / np.sqrt(np.sum(x ** 2, axis=1)).reshape((-1, 1))
def projection_matrix(angle, ratio, znear, zfar):
"""Return a 4D projection matrix.
Arguments:
* angle: angle of the field of view, in radians.
* ratio: W/H ratio of the field of view.
* znear, zfar: near and far projection planes.
Returns:
* P: the 4x4 projection matrix.
"""
return np.array([[1. / np.tan(angle), 0, 0, 0],
[0, ratio / np.tan(angle), 0, 0],
[0, 0, (zfar + znear) / (zfar - znear), 1],
[0, 0, -2. * zfar * znear / (zfar - znear), 0]])
def rotation_matrix(angle, axis=0):
"""Return a rotation matrix.
Arguments:
* angle: the angle of the rotation, in radians.
* axis=0: the axis around which the rotation is made. It can be
0 (rotation around x), 1 (rotation around y), 2 (rotation around z).
Returns:
* R: the 4x4 rotation matrix.
"""
mat = np.eye(4)
ind = np.array(sorted(list(set(range(3)).difference([axis]))))
mat[np.ix_(ind,ind)] = np.array([[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]])
return mat
def scale_matrix(x, y, z):
"""Return a scaling matrix.
Arguments:
* x, y, z: the scaling coefficients in each direction.
Returns:
* S: the 4x4 projection matrix.
"""
return np.diag([x, y, z, 1.])
def translation_matrix(x, y, z):
"""Return a translation matrix.
Arguments:
* x, y, z: the translation coefficients in each direction.
Returns:
* S: the 4x4 translation matrix.
"""
return np.array([
[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[-x, -y, -z, 1]
])
def camera_matrix(eye, target=None, up=None):
"""Return a camera matrix.
Arguments:
* eye: the position of the camera as a 3-vector.
* target: the position of the view target of the camera, as a 3-vector.
* up: a normalized vector pointing in the up direction, as a 3-vector.
Returns:
* S: the 4x4 camera matrix.
"""
if target is None:
target = np.zeros(3)
if up is None:
target = np.array([0., 1., 0.])
zaxis = normalize(target - eye)
xaxis = normalize(np.cross(up, zaxis))
yaxis = normalize(np.cross(zaxis, xaxis))
orientation = np.array([
[xaxis[0], yaxis[0], zaxis[0], 0],
[xaxis[1], yaxis[1], zaxis[1], 0],
[xaxis[2], yaxis[2], zaxis[2], 0],
[ 0, 0, 0, 1]
])
translation = translation_matrix(*eye)
return np.dot(translation, orientation)
class MeshVisual(Visual):
"""Template for basic 3D rendering.
This template allows to render 3D vertices with 3D perspective and basic
diffusive and ambient lighting.
"""
def initialize_navigation(self, is_static=False):
"""Add static or dynamic position transformation."""
if not is_static:
self.add_uniform("transform", vartype="float", ndim=(4,4),
size=None, data=np.eye(4))
self.add_vertex_main("""
gl_Position = projection * camera * transform * gl_Position;""",
position='last')
else:
self.add_vertex_main("""
gl_Position = projection * camera * gl_Position;""")
def initialize_default(self, is_static=False, constrain_ratio=False, **kwargs):
"""Default initialization with navigation-related code."""
self.is_static = is_static
self.constrain_ratio = constrain_ratio
self.initialize_navigation(is_static)
def initialize(self, camera_angle=None, camera_ratio=None, autocolor=None,
camera_zrange=None, position=None, color=None, normal=None, index=None,
vertex_shader=None):
"""Initialize the template.
Arguments:
* camera_angle: the view angle of the camera, in radians.
* camera_ratio: the W/H ratio of the camera.
* camera_zrange: a pair with the far and near z values for the camera
projection.
Template fields are:
* `position`: an attribute with the positions as 3D vertices,
* `normal`: an attribute with the normals as 3D vectors,
* `color`: an attribute with the color of each vertex, as 4D vertices.
* `projection`: an uniform with the 4x4 projection matrix, returned by
`projection_matrix()`.
* `camera`: an uniform with the 4x4 camera matrix, returned by
`camera_matrix()`.
* `transform`: an uniform with the 4x4 transform matrix, returned by
a dot product of `scale_matrix()`, `rotation_matrix()` and
`translation_matrix()`.
* `light_direction`: the direction of the diffuse light as a
3-vector.
* `ambient_light`: the amount of ambient light (white color).
"""
if autocolor is not None:
color = get_color(autocolor)
if camera_angle is None:
camera_angle = np.pi / 4
if camera_ratio is None:
camera_ratio = 4./3.
if camera_zrange is None:
camera_zrange = (.5, 10.)
self.size = position.shape[0]
if self.primitive_type is None:
self.primitive_type = 'TRIANGLES'
self.add_attribute("position", vartype="float", ndim=3, data=position)
self.add_attribute("normal", vartype="float", ndim=3, data=normal)
self.add_attribute("color", vartype="float", ndim=4, data=color)
if index is not None:
self.add_index("index", data=index)
self.add_varying("varying_color", vartype="float", ndim=4)
projection = projection_matrix(camera_angle, camera_ratio, *camera_zrange)
camera = camera_matrix(np.array([0., 0., -4.]),
np.zeros(3),
np.array([0., 1., 0.]))
transform = np.eye(4)
self.add_uniform("projection", ndim=(4, 4), size=None, data=projection)
self.add_uniform("camera", ndim=(4, 4), size=None, data=camera)
self.add_uniform("transform", ndim=(4, 4), size=None, data=transform)
light_direction = normalize(np.array([0., 0., -1.]))
ambient_light = .25
self.add_uniform("light_direction", size=None, ndim=3, data=light_direction)
self.add_uniform("ambient_light", size=None, ndim=1, data=ambient_light)
if not vertex_shader:
vertex_shader = """
// convert the position from 3D to 4D.
gl_Position = vec4(position, 1.0);
// compute the amount of light
float light = dot(light_direction, normalize(mat3(camera) * mat3(transform) * normal));
light = clamp(light, 0, 1);
// add the ambient term
light = clamp(ambient_light + light, 0, 1);
// compute the final color
varying_color = color * light;
// keep the transparency
varying_color.w = color.w;
"""
self.add_vertex_main(vertex_shader)
self.add_fragment_main("""
out_color = varying_color;
""") | PypiClean |
/ImSwitch-2.0.0.tar.gz/ImSwitch-2.0.0/imswitch/imcontrol/controller/controllers/AlignAverageController.py | import numpy as np
from ..basecontrollers import LiveUpdatedController
class AlignAverageController(LiveUpdatedController):
""" Linked to AlignAverageWidget."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.roiAdded = False
# Connect CommunicationChannel signals
self._commChannel.sigUpdateImage.connect(self.update)
# Connect AlignAverageWidget signals
self._widget.sigShowROIToggled.connect(self.toggleROI)
def update(self, detectorName, im, init, isCurrentDetector):
""" Update with new detector frame. """
if isCurrentDetector and self.active:
value = np.mean(
self.getCroppedImage(im, self._widget.getROIGraphicsItem())
)
self._widget.updateGraph(value)
def addROI(self):
""" Adds the ROI to ImageWidget viewbox through the CommunicationChannel. """
if not self.roiAdded:
self._commChannel.sigAddItemToVb.emit(self._widget.getROIGraphicsItem())
self.roiAdded = True
def toggleROI(self, show):
""" Show or hide ROI."""
if show:
self.addROI()
ROIsize = (64, 64)
ROIcenter = self._commChannel.getCenterViewbox()
ROIpos = (ROIcenter[0] - 0.5 * ROIsize[0],
ROIcenter[1] - 0.5 * ROIsize[1])
self._widget.showROI(ROIpos, ROIsize)
else:
self._widget.hideROI()
self.active = show
def getCroppedImage(self, image, roiItem):
""" Returns the cropped image within the ROI. """
x0, y0, x1, y1 = roiItem.bounds
return image[x0:x1, y0:y1]
# Copyright (C) 2020-2021 ImSwitch developers
# This file is part of ImSwitch.
#
# ImSwitch is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ImSwitch is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. | PypiClean |
/CleanAdminDjango-1.5.3.1.tar.gz/CleanAdminDjango-1.5.3.1/django/contrib/admin/util.py | from __future__ import unicode_literals
import datetime
import decimal
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.related import RelatedObject
from django.forms.forms import pretty_name
from django.utils import formats
from django.utils.html import format_html
from django.utils.text import capfirst
from django.utils import timezone
from django.utils.encoding import force_str, force_text, smart_text
from django.utils import six
from django.utils.translation import ungettext
from django.core.urlresolvers import reverse
def lookup_needs_distinct(opts, lookup_path):
"""
Returns True if 'distinct()' should be used to query the given lookup path.
"""
field_name = lookup_path.split('__', 1)[0]
field = opts.get_field_by_name(field_name)[0]
if ((hasattr(field, 'rel') and
isinstance(field.rel, models.ManyToManyRel)) or
(isinstance(field, models.related.RelatedObject) and
not field.field.unique)):
return True
return False
def prepare_lookup_value(key, value):
"""
Returns a lookup value prepared to be used in queryset filtering.
"""
# if key ends with __in, split parameter into separate values
if key.endswith('__in'):
value = value.split(',')
# if key ends with __isnull, special case '' and false
if key.endswith('__isnull'):
if value.lower() in ('', 'false'):
value = False
else:
value = True
return value
def quote(s):
"""
Ensure that primary key values do not confuse the admin URLs by escaping
any '/', '_' and ':' and similarly problematic characters.
Similar to urllib.quote, except that the quoting is slightly different so
that it doesn't get automatically unquoted by the Web browser.
"""
if not isinstance(s, six.string_types):
return s
res = list(s)
for i in range(len(res)):
c = res[i]
if c in """:/_#?;@&=+$,"<>%\\""":
res[i] = '_%02X' % ord(c)
return ''.join(res)
def unquote(s):
"""
Undo the effects of quote(). Based heavily on urllib.unquote().
"""
mychr = chr
myatoi = int
list = s.split('_')
res = [list[0]]
myappend = res.append
del list[0]
for item in list:
if item[1:2]:
try:
myappend(mychr(myatoi(item[:2], 16)) + item[2:])
except ValueError:
myappend('_' + item)
else:
myappend('_' + item)
return "".join(res)
def flatten_fieldsets(fieldsets):
"""Returns a list of field names from an admin fieldsets structure."""
field_names = []
for name, opts in fieldsets:
for field in opts['fields']:
# type checking feels dirty, but it seems like the best way here
if type(field) == tuple:
field_names.extend(field)
else:
field_names.append(field)
return field_names
def get_deleted_objects(objs, opts, user, admin_site, using):
"""
Find all objects related to ``objs`` that should also be deleted. ``objs``
must be a homogenous iterable of objects (e.g. a QuerySet).
Returns a nested list of strings suitable for display in the
template with the ``unordered_list`` filter.
"""
collector = NestedObjects(using=using)
collector.collect(objs)
perms_needed = set()
def format_callback(obj):
has_admin = obj.__class__ in admin_site._registry
opts = obj._meta
if has_admin:
admin_url = admin_site._registry[
obj.__class__].get_url('change', obj, site=admin_site)
p = '%s.%s' % (opts.app_label,
opts.get_delete_permission())
if not user.has_perm(p):
perms_needed.add(opts.verbose_name)
# Display a link to the admin page.
return format_html('{0}: <a href="{1}">{2}</a>',
capfirst(opts.verbose_name),
admin_url,
obj)
else:
# Don't display link to edit, because it either has no
# admin or is edited inline.
return '%s: %s' % (capfirst(opts.verbose_name),
force_text(obj))
to_delete = collector.nested(format_callback)
protected = [format_callback(obj) for obj in collector.protected]
return to_delete, perms_needed, protected
class NestedObjects(Collector):
def __init__(self, *args, **kwargs):
super(NestedObjects, self).__init__(*args, **kwargs)
self.edges = {} # {from_instance: [to_instances]}
self.protected = set()
def add_edge(self, source, target):
self.edges.setdefault(source, []).append(target)
def collect(self, objs, source_attr=None, **kwargs):
for obj in objs:
if source_attr:
self.add_edge(getattr(obj, source_attr), obj)
else:
self.add_edge(None, obj)
try:
return super(NestedObjects, self).collect(objs, source_attr=source_attr, **kwargs)
except models.ProtectedError as e:
self.protected.update(e.protected_objects)
def related_objects(self, related, objs):
qs = super(NestedObjects, self).related_objects(related, objs)
return qs.select_related(related.field.name)
def _nested(self, obj, seen, format_callback):
if obj in seen:
return []
seen.add(obj)
children = []
for child in self.edges.get(obj, ()):
children.extend(self._nested(child, seen, format_callback))
if format_callback:
ret = [format_callback(obj)]
else:
ret = [obj]
if children:
ret.append(children)
return ret
def nested(self, format_callback=None):
"""
Return the graph as a nested list.
"""
seen = set()
roots = []
for root in self.edges.get(None, ()):
roots.extend(self._nested(root, seen, format_callback))
return roots
def can_fast_delete(self, *args, **kwargs):
"""
We always want to load the objects into memory so that we can display
them to the user in confirm page.
"""
return False
def model_format_dict(obj):
"""
Return a `dict` with keys 'verbose_name' and 'verbose_name_plural',
typically for use with string formatting.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
"""
if isinstance(obj, (models.Model, models.base.ModelBase)):
opts = obj._meta
elif isinstance(obj, models.query.QuerySet):
opts = obj.model._meta
else:
opts = obj
return {
'verbose_name': force_text(opts.verbose_name),
'verbose_name_plural': force_text(opts.verbose_name_plural)
}
def model_ngettext(obj, n=None):
"""
Return the appropriate `verbose_name` or `verbose_name_plural` value for
`obj` depending on the count `n`.
`obj` may be a `Model` instance, `Model` subclass, or `QuerySet` instance.
If `obj` is a `QuerySet` instance, `n` is optional and the length of the
`QuerySet` is used.
"""
if isinstance(obj, models.query.QuerySet):
if n is None:
n = obj.count()
obj = obj.model
d = model_format_dict(obj)
singular, plural = d["verbose_name"], d["verbose_name_plural"]
return ungettext(singular, plural, n or 0)
def lookup_field(name, obj, model_admin=None):
opts = obj._meta
try:
f = opts.get_field(name)
except models.FieldDoesNotExist:
# For non-field values, the value is either a method, property or
# returned via a callable.
if callable(name):
attr = name
value = attr(obj)
elif (model_admin is not None and hasattr(model_admin, name) and
not name == '__str__' and not name == '__unicode__'):
attr = getattr(model_admin, name)
value = attr(obj)
else:
attr = getattr(obj, name)
if callable(attr):
value = attr()
else:
value = attr
f = None
else:
attr = None
value = getattr(obj, name)
return f, attr, value
def label_for_field(name, model, model_admin=None, return_attr=False):
"""
Returns a sensible label for a field name. The name can be a callable or the
name of an object attributes, as well as a genuine fields. If return_attr is
True, the resolved attribute (which could be a callable) is also returned.
This will be None if (and only if) the name refers to a field.
"""
attr = None
try:
field = model._meta.get_field_by_name(name)[0]
if isinstance(field, RelatedObject):
label = field.opts.verbose_name
else:
label = field.verbose_name
except models.FieldDoesNotExist:
if name == "__unicode__":
label = force_text(model._meta.verbose_name)
attr = six.text_type
elif name == "__str__":
label = force_str(model._meta.verbose_name)
attr = bytes
else:
if callable(name):
attr = name
elif model_admin is not None and hasattr(model_admin, name):
attr = getattr(model_admin, name)
elif hasattr(model, name):
attr = getattr(model, name)
else:
message = "Unable to lookup '%s' on %s" % (name, model._meta.object_name)
if model_admin:
message += " or %s" % (model_admin.__class__.__name__,)
raise AttributeError(message)
if hasattr(attr, "short_description"):
label = attr.short_description
elif callable(attr):
if attr.__name__ == "<lambda>":
label = "--"
else:
label = pretty_name(attr.__name__)
else:
label = pretty_name(name)
if return_attr:
return (label, attr)
else:
return label
def help_text_for_field(name, model):
try:
help_text = model._meta.get_field_by_name(name)[0].help_text
except models.FieldDoesNotExist:
help_text = ""
return smart_text(help_text)
def display_for_field(value, field):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if field.flatchoices:
return dict(field.flatchoices).get(value, EMPTY_CHANGELIST_VALUE)
# NullBooleanField needs special-case null-handling, so it comes
# before the general null test.
elif isinstance(field, models.BooleanField) or isinstance(field, models.NullBooleanField):
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(field, models.DateTimeField):
return formats.localize(timezone.template_localtime(value))
elif isinstance(field, (models.DateField, models.TimeField)):
return formats.localize(value)
elif isinstance(field, models.DecimalField):
return formats.number_format(value, field.decimal_places)
elif isinstance(field, models.FloatField):
return formats.number_format(value)
else:
return smart_text(value)
def display_for_value(value, boolean=False):
from django.contrib.admin.templatetags.admin_list import _boolean_icon
from django.contrib.admin.views.main import EMPTY_CHANGELIST_VALUE
if boolean:
return _boolean_icon(value)
elif value is None:
return EMPTY_CHANGELIST_VALUE
elif isinstance(value, datetime.datetime):
return formats.localize(timezone.template_localtime(value))
elif isinstance(value, (datetime.date, datetime.time)):
return formats.localize(value)
elif isinstance(value, six.integer_types + (decimal.Decimal, float)):
return formats.number_format(value)
else:
return smart_text(value)
class NotRelationField(Exception):
pass
def get_model_from_relation(field):
if isinstance(field, models.related.RelatedObject):
return field.model
elif getattr(field, 'rel'): # or isinstance?
return field.rel.to
else:
raise NotRelationField
def reverse_field_path(model, path):
""" Create a reversed field path.
E.g. Given (Order, "user__groups"),
return (Group, "user__order").
Final field must be a related model, not a data field.
"""
reversed_path = []
parent = model
pieces = path.split(LOOKUP_SEP)
for piece in pieces:
field, model, direct, m2m = parent._meta.get_field_by_name(piece)
# skip trailing data field if extant:
if len(reversed_path) == len(pieces)-1: # final iteration
try:
get_model_from_relation(field)
except NotRelationField:
break
if direct:
related_name = field.related_query_name()
parent = field.rel.to
else:
related_name = field.field.name
parent = field.model
reversed_path.insert(0, related_name)
return (parent, LOOKUP_SEP.join(reversed_path))
def get_fields_from_path(model, path):
""" Return list of Fields given path relative to model.
e.g. (ModelX, "user__groups__name") -> [
<django.db.models.fields.related.ForeignKey object at 0x...>,
<django.db.models.fields.related.ManyToManyField object at 0x...>,
<django.db.models.fields.CharField object at 0x...>,
]
"""
pieces = path.split(LOOKUP_SEP)
fields = []
for piece in pieces:
if fields:
parent = get_model_from_relation(fields[-1])
else:
parent = model
fields.append(parent._meta.get_field_by_name(piece)[0])
return fields
def remove_trailing_data_field(fields):
""" Discard trailing non-relation field if extant. """
try:
get_model_from_relation(fields[-1])
except NotRelationField:
fields = fields[:-1]
return fields
def get_limit_choices_to_from_path(model, path):
""" Return Q object for limiting choices if applicable.
If final model in path is linked via a ForeignKey or ManyToManyField which
has a `limit_choices_to` attribute, return it as a Q object.
"""
fields = get_fields_from_path(model, path)
fields = remove_trailing_data_field(fields)
limit_choices_to = (
fields and hasattr(fields[-1], 'rel') and
getattr(fields[-1].rel, 'limit_choices_to', None))
if not limit_choices_to:
return models.Q() # empty Q
elif isinstance(limit_choices_to, models.Q):
return limit_choices_to # already a Q
else:
return models.Q(**limit_choices_to) # convert dict to Q | PypiClean |
/C-Telethon-1.28.5.tar.gz/C-Telethon-1.28.5/telethon/tl/alltlobjects.py |
from . import types, functions
LAYER = 158
tlobjects = {
0x7f3b18ea: types.InputPeerEmpty,
0x7da07ec9: types.InputPeerSelf,
0x35a95cb9: types.InputPeerChat,
0xdde8a54c: types.InputPeerUser,
0x27bcbbfc: types.InputPeerChannel,
0xa87b0a1c: types.InputPeerUserFromMessage,
0xbd2a0840: types.InputPeerChannelFromMessage,
0xb98886cf: types.InputUserEmpty,
0xf7c1b13f: types.InputUserSelf,
0xf21158c6: types.InputUser,
0x1da448e2: types.InputUserFromMessage,
0xf392b7f4: types.InputPhoneContact,
0xf52ff27f: types.InputFile,
0xfa4f0bb5: types.InputFileBig,
0x9664f57f: types.InputMediaEmpty,
0x1e287d04: types.InputMediaUploadedPhoto,
0xb3ba0635: types.InputMediaPhoto,
0xf9c44144: types.InputMediaGeoPoint,
0xf8ab7dfb: types.InputMediaContact,
0x5b38c6c1: types.InputMediaUploadedDocument,
0x33473058: types.InputMediaDocument,
0xc13d1c11: types.InputMediaVenue,
0xe5bbfe1a: types.InputMediaPhotoExternal,
0xfb52dc99: types.InputMediaDocumentExternal,
0xd33f43f3: types.InputMediaGame,
0x8eb5a6d5: types.InputMediaInvoice,
0x971fa843: types.InputMediaGeoLive,
0x0f94e5f1: types.InputMediaPoll,
0xe66fbf7b: types.InputMediaDice,
0x1ca48f57: types.InputChatPhotoEmpty,
0xbdcdaec0: types.InputChatUploadedPhoto,
0x8953ad37: types.InputChatPhoto,
0xe4c123d6: types.InputGeoPointEmpty,
0x48222faf: types.InputGeoPoint,
0x1cd7bf0d: types.InputPhotoEmpty,
0x3bb3b94a: types.InputPhoto,
0xdfdaabe1: types.InputFileLocation,
0xf5235d55: types.InputEncryptedFileLocation,
0xbad07584: types.InputDocumentFileLocation,
0xcbc7ee28: types.InputSecureFileLocation,
0x29be5899: types.InputTakeoutFileLocation,
0x40181ffe: types.InputPhotoFileLocation,
0xd83466f3: types.InputPhotoLegacyFileLocation,
0x37257e99: types.InputPeerPhotoFileLocation,
0x9d84f3db: types.InputStickerSetThumb,
0x0598a92a: types.InputGroupCallStream,
0x59511722: types.PeerUser,
0x36c6019a: types.PeerChat,
0xa2a5371e: types.PeerChannel,
0xaa963b05: types.storage.FileUnknown,
0x40bc6f52: types.storage.FilePartial,
0x007efe0e: types.storage.FileJpeg,
0xcae1aadf: types.storage.FileGif,
0x0a4f63c0: types.storage.FilePng,
0xae1e508d: types.storage.FilePdf,
0x528a0677: types.storage.FileMp3,
0x4b09ebbc: types.storage.FileMov,
0xb3cea0e4: types.storage.FileMp4,
0x1081464c: types.storage.FileWebp,
0xd3bc4b7a: types.UserEmpty,
0x8f97c628: types.User,
0x4f11bae1: types.UserProfilePhotoEmpty,
0x82d1f706: types.UserProfilePhoto,
0x09d05049: types.UserStatusEmpty,
0xedb93949: types.UserStatusOnline,
0x008c703f: types.UserStatusOffline,
0xe26f42f1: types.UserStatusRecently,
0x07bf09fc: types.UserStatusLastWeek,
0x77ebc742: types.UserStatusLastMonth,
0x29562865: types.ChatEmpty,
0x41cbf256: types.Chat,
0x6592a1a7: types.ChatForbidden,
0x83259464: types.Channel,
0x17d493d5: types.ChannelForbidden,
0xc9d31138: types.ChatFull,
0xf2355507: types.ChannelFull,
0xc02d4007: types.ChatParticipant,
0xe46bcee4: types.ChatParticipantCreator,
0xa0933f5b: types.ChatParticipantAdmin,
0x8763d3e1: types.ChatParticipantsForbidden,
0x3cbc93f8: types.ChatParticipants,
0x37c1011c: types.ChatPhotoEmpty,
0x1c6e1c11: types.ChatPhoto,
0x90a6ca84: types.MessageEmpty,
0x38116ee0: types.Message,
0x2b085862: types.MessageService,
0x3ded6320: types.MessageMediaEmpty,
0x695150d7: types.MessageMediaPhoto,
0x56e0d474: types.MessageMediaGeo,
0x70322949: types.MessageMediaContact,
0x9f84f49e: types.MessageMediaUnsupported,
0x9cb070d7: types.MessageMediaDocument,
0xa32dd600: types.MessageMediaWebPage,
0x2ec0533f: types.MessageMediaVenue,
0xfdb19008: types.MessageMediaGame,
0xf6a548d3: types.MessageMediaInvoice,
0xb940c666: types.MessageMediaGeoLive,
0x4bd6e798: types.MessageMediaPoll,
0x3f7ee58b: types.MessageMediaDice,
0xb6aef7b0: types.MessageActionEmpty,
0xbd47cbad: types.MessageActionChatCreate,
0xb5a1ce5a: types.MessageActionChatEditTitle,
0x7fcb13a8: types.MessageActionChatEditPhoto,
0x95e3fbef: types.MessageActionChatDeletePhoto,
0x15cefd00: types.MessageActionChatAddUser,
0xa43f30cc: types.MessageActionChatDeleteUser,
0x031224c3: types.MessageActionChatJoinedByLink,
0x95d2ac92: types.MessageActionChannelCreate,
0xe1037f92: types.MessageActionChatMigrateTo,
0xea3948e9: types.MessageActionChannelMigrateFrom,
0x94bd38ed: types.MessageActionPinMessage,
0x9fbab604: types.MessageActionHistoryClear,
0x92a72876: types.MessageActionGameScore,
0x8f31b327: types.MessageActionPaymentSentMe,
0x96163f56: types.MessageActionPaymentSent,
0x80e11a7f: types.MessageActionPhoneCall,
0x4792929b: types.MessageActionScreenshotTaken,
0xfae69f56: types.MessageActionCustomAction,
0xc516d679: types.MessageActionBotAllowed,
0x1b287353: types.MessageActionSecureValuesSentMe,
0xd95c6154: types.MessageActionSecureValuesSent,
0xf3f25f76: types.MessageActionContactSignUp,
0x98e0d697: types.MessageActionGeoProximityReached,
0x7a0d7f42: types.MessageActionGroupCall,
0x502f92f7: types.MessageActionInviteToGroupCall,
0x3c134d7b: types.MessageActionSetMessagesTTL,
0xb3a07661: types.MessageActionGroupCallScheduled,
0xaa786345: types.MessageActionSetChatTheme,
0xebbca3cb: types.MessageActionChatJoinedByRequest,
0x47dd8079: types.MessageActionWebViewDataSentMe,
0xb4c38cb5: types.MessageActionWebViewDataSent,
0xc83d6aec: types.MessageActionGiftPremium,
0x0d999256: types.MessageActionTopicCreate,
0xc0944820: types.MessageActionTopicEdit,
0x57de635e: types.MessageActionSuggestProfilePhoto,
0xfe77345d: types.MessageActionRequestedPeer,
0xbc44a927: types.MessageActionSetChatWallPaper,
0xc0787d6d: types.MessageActionSetSameChatWallPaper,
0xd58a08c6: types.Dialog,
0x71bd134c: types.DialogFolder,
0x2331b22d: types.PhotoEmpty,
0xfb197a65: types.Photo,
0x0e17e23c: types.PhotoSizeEmpty,
0x75c78e60: types.PhotoSize,
0x021e1ad6: types.PhotoCachedSize,
0xe0b0bc2e: types.PhotoStrippedSize,
0xfa3efb95: types.PhotoSizeProgressive,
0xd8214d41: types.PhotoPathSize,
0x1117dd5f: types.GeoPointEmpty,
0xb2a2f663: types.GeoPoint,
0x5e002502: types.auth.SentCode,
0x2390fe44: types.auth.SentCodeSuccess,
0x2ea2c0d4: types.auth.Authorization,
0x44747e9a: types.auth.AuthorizationSignUpRequired,
0xb434e2b8: types.auth.ExportedAuthorization,
0xb8bc5b0c: types.InputNotifyPeer,
0x193b4417: types.InputNotifyUsers,
0x4a95e84e: types.InputNotifyChats,
0xb1db7c7e: types.InputNotifyBroadcasts,
0x5c467992: types.InputNotifyForumTopic,
0xdf1f002b: types.InputPeerNotifySettings,
0xa83b0426: types.PeerNotifySettings,
0xa518110d: types.PeerSettings,
0xa437c3ed: types.WallPaper,
0xe0804116: types.WallPaperNoFile,
0x58dbcab8: types.InputReportReasonSpam,
0x1e22c78d: types.InputReportReasonViolence,
0x2e59d922: types.InputReportReasonPornography,
0xadf44ee3: types.InputReportReasonChildAbuse,
0xc1e4a2b1: types.InputReportReasonOther,
0x9b89f93a: types.InputReportReasonCopyright,
0xdbd4feed: types.InputReportReasonGeoIrrelevant,
0xf5ddd6e7: types.InputReportReasonFake,
0x0a8eb2be: types.InputReportReasonIllegalDrugs,
0x9ec7863d: types.InputReportReasonPersonalDetails,
0x93eadb53: types.UserFull,
0x145ade0b: types.Contact,
0xc13e3c50: types.ImportedContact,
0x16d9703b: types.ContactStatus,
0xb74ba9d2: types.contacts.ContactsNotModified,
0xeae87e42: types.contacts.Contacts,
0x77d01c3b: types.contacts.ImportedContacts,
0x0ade1591: types.contacts.Blocked,
0xe1664194: types.contacts.BlockedSlice,
0x15ba6c40: types.messages.Dialogs,
0x71e094f3: types.messages.DialogsSlice,
0xf0e3e596: types.messages.DialogsNotModified,
0x8c718e87: types.messages.Messages,
0x3a54685e: types.messages.MessagesSlice,
0xc776ba4e: types.messages.ChannelMessages,
0x74535f21: types.messages.MessagesNotModified,
0x64ff9fd5: types.messages.Chats,
0x9cd81144: types.messages.ChatsSlice,
0xe5d7d19c: types.messages.ChatFull,
0xb45c69d1: types.messages.AffectedHistory,
0x57e2f66c: types.InputMessagesFilterEmpty,
0x9609a51c: types.InputMessagesFilterPhotos,
0x9fc00e65: types.InputMessagesFilterVideo,
0x56e9f0e4: types.InputMessagesFilterPhotoVideo,
0x9eddf188: types.InputMessagesFilterDocument,
0x7ef0dd87: types.InputMessagesFilterUrl,
0xffc86587: types.InputMessagesFilterGif,
0x50f5c392: types.InputMessagesFilterVoice,
0x3751b49e: types.InputMessagesFilterMusic,
0x3a20ecb8: types.InputMessagesFilterChatPhotos,
0x80c99768: types.InputMessagesFilterPhoneCalls,
0x7a7c17a4: types.InputMessagesFilterRoundVoice,
0xb549da53: types.InputMessagesFilterRoundVideo,
0xc1f8e69a: types.InputMessagesFilterMyMentions,
0xe7026d0d: types.InputMessagesFilterGeo,
0xe062db83: types.InputMessagesFilterContacts,
0x1bb00451: types.InputMessagesFilterPinned,
0x1f2b0afd: types.UpdateNewMessage,
0x4e90bfd6: types.UpdateMessageID,
0xa20db0e5: types.UpdateDeleteMessages,
0xc01e857f: types.UpdateUserTyping,
0x83487af0: types.UpdateChatUserTyping,
0x07761198: types.UpdateChatParticipants,
0xe5bdf8de: types.UpdateUserStatus,
0xa7848924: types.UpdateUserName,
0x12bcbd9a: types.UpdateNewEncryptedMessage,
0x1710f156: types.UpdateEncryptedChatTyping,
0xb4a2e88d: types.UpdateEncryption,
0x38fe25b7: types.UpdateEncryptedMessagesRead,
0x3dda5451: types.UpdateChatParticipantAdd,
0xe32f3d77: types.UpdateChatParticipantDelete,
0x8e5e9873: types.UpdateDcOptions,
0xbec268ef: types.UpdateNotifySettings,
0xebe46819: types.UpdateServiceNotification,
0xee3b272a: types.UpdatePrivacy,
0x05492a13: types.UpdateUserPhone,
0x9c974fdf: types.UpdateReadHistoryInbox,
0x2f2f21bf: types.UpdateReadHistoryOutbox,
0x7f891213: types.UpdateWebPage,
0x68c13933: types.UpdateReadMessagesContents,
0x108d941f: types.UpdateChannelTooLong,
0x635b4c09: types.UpdateChannel,
0x62ba04d9: types.UpdateNewChannelMessage,
0x922e6e10: types.UpdateReadChannelInbox,
0xc32d5b12: types.UpdateDeleteChannelMessages,
0xf226ac08: types.UpdateChannelMessageViews,
0xd7ca61a2: types.UpdateChatParticipantAdmin,
0x688a30aa: types.UpdateNewStickerSet,
0x0bb2d201: types.UpdateStickerSetsOrder,
0x31c24808: types.UpdateStickerSets,
0x9375341e: types.UpdateSavedGifs,
0x496f379c: types.UpdateBotInlineQuery,
0x12f12a07: types.UpdateBotInlineSend,
0x1b3f4df7: types.UpdateEditChannelMessage,
0xb9cfc48d: types.UpdateBotCallbackQuery,
0xe40370a3: types.UpdateEditMessage,
0x691e9052: types.UpdateInlineBotCallbackQuery,
0xb75f99a9: types.UpdateReadChannelOutbox,
0x1b49ec6d: types.UpdateDraftMessage,
0x571d2742: types.UpdateReadFeaturedStickers,
0x9a422c20: types.UpdateRecentStickers,
0xa229dd06: types.UpdateConfig,
0x3354678f: types.UpdatePtsChanged,
0x2f2ba99f: types.UpdateChannelWebPage,
0x6e6fe51c: types.UpdateDialogPinned,
0xfa0f3ca2: types.UpdatePinnedDialogs,
0x8317c0c3: types.UpdateBotWebhookJSON,
0x9b9240a6: types.UpdateBotWebhookJSONQuery,
0xb5aefd7d: types.UpdateBotShippingQuery,
0x8caa9a96: types.UpdateBotPrecheckoutQuery,
0xab0f6b1e: types.UpdatePhoneCall,
0x46560264: types.UpdateLangPackTooLong,
0x56022f4d: types.UpdateLangPack,
0xe511996d: types.UpdateFavedStickers,
0xea29055d: types.UpdateChannelReadMessagesContents,
0x7084a7be: types.UpdateContactsReset,
0xb23fc698: types.UpdateChannelAvailableMessages,
0xe16459c3: types.UpdateDialogUnreadMark,
0xaca1657b: types.UpdateMessagePoll,
0x54c01850: types.UpdateChatDefaultBannedRights,
0x19360dc0: types.UpdateFolderPeers,
0x6a7e7366: types.UpdatePeerSettings,
0xb4afcfb0: types.UpdatePeerLocated,
0x39a51dfb: types.UpdateNewScheduledMessage,
0x90866cee: types.UpdateDeleteScheduledMessages,
0x8216fba3: types.UpdateTheme,
0x871fb939: types.UpdateGeoLiveViewed,
0x564fe691: types.UpdateLoginToken,
0x106395c9: types.UpdateMessagePollVote,
0x26ffde7d: types.UpdateDialogFilter,
0xa5d72105: types.UpdateDialogFilterOrder,
0x3504914f: types.UpdateDialogFilters,
0x2661bf09: types.UpdatePhoneCallSignalingData,
0xd29a27f4: types.UpdateChannelMessageForwards,
0xd6b19546: types.UpdateReadChannelDiscussionInbox,
0x695c9e7c: types.UpdateReadChannelDiscussionOutbox,
0x246a4b22: types.UpdatePeerBlocked,
0x8c88c923: types.UpdateChannelUserTyping,
0xed85eab5: types.UpdatePinnedMessages,
0x5bb98608: types.UpdatePinnedChannelMessages,
0xf89a6a4e: types.UpdateChat,
0xf2ebdb4e: types.UpdateGroupCallParticipants,
0x14b24500: types.UpdateGroupCall,
0xbb9bb9a5: types.UpdatePeerHistoryTTL,
0xd087663a: types.UpdateChatParticipant,
0x985d3abb: types.UpdateChannelParticipant,
0xc4870a49: types.UpdateBotStopped,
0x0b783982: types.UpdateGroupCallConnection,
0x4d712f2e: types.UpdateBotCommands,
0x7063c3db: types.UpdatePendingJoinRequests,
0x11dfa986: types.UpdateBotChatInviteRequester,
0x5e1b3cb8: types.UpdateMessageReactions,
0x17b7a20b: types.UpdateAttachMenuBots,
0x1592b79d: types.UpdateWebViewResultSent,
0x14b85813: types.UpdateBotMenuButton,
0x74d8be99: types.UpdateSavedRingtones,
0x0084cd5a: types.UpdateTranscribedAudio,
0xfb4c496c: types.UpdateReadFeaturedEmojiStickers,
0x28373599: types.UpdateUserEmojiStatus,
0x30f443db: types.UpdateRecentEmojiStatuses,
0x6f7863f4: types.UpdateRecentReactions,
0x86fccf85: types.UpdateMoveStickerSetToTop,
0x5a73a98c: types.UpdateMessageExtendedMedia,
0x192efbe3: types.UpdateChannelPinnedTopic,
0xfe198602: types.UpdateChannelPinnedTopics,
0x20529438: types.UpdateUser,
0xec05b097: types.UpdateAutoSaveSettings,
0xccf08ad6: types.UpdateGroupInvitePrivacyForbidden,
0xa56c2a3e: types.updates.State,
0x5d75a138: types.updates.DifferenceEmpty,
0x00f49ca0: types.updates.Difference,
0xa8fb1981: types.updates.DifferenceSlice,
0x4afe8f6d: types.updates.DifferenceTooLong,
0xe317af7e: types.UpdatesTooLong,
0x313bc7f8: types.UpdateShortMessage,
0x4d6deea5: types.UpdateShortChatMessage,
0x78d4dec1: types.UpdateShort,
0x725b04c3: types.UpdatesCombined,
0x74ae4240: types.Updates,
0x9015e101: types.UpdateShortSentMessage,
0x8dca6aa5: types.photos.Photos,
0x15051f54: types.photos.PhotosSlice,
0x20212ca8: types.photos.Photo,
0x096a18d5: types.upload.File,
0xf18cda44: types.upload.FileCdnRedirect,
0x18b7a10d: types.DcOption,
0xcc1a241e: types.Config,
0x8e1a1775: types.NearestDc,
0xccbbce30: types.help.AppUpdate,
0xc45a6536: types.help.NoAppUpdate,
0x18cb9f78: types.help.InviteText,
0xab7ec0a0: types.EncryptedChatEmpty,
0x66b25953: types.EncryptedChatWaiting,
0x48f1d94c: types.EncryptedChatRequested,
0x61f0d4c7: types.EncryptedChat,
0x1e1c7c45: types.EncryptedChatDiscarded,
0xf141b5e1: types.InputEncryptedChat,
0xc21f497e: types.EncryptedFileEmpty,
0xa8008cd8: types.EncryptedFile,
0x1837c364: types.InputEncryptedFileEmpty,
0x64bd0306: types.InputEncryptedFileUploaded,
0x5a17b5e5: types.InputEncryptedFile,
0x2dc173c8: types.InputEncryptedFileBigUploaded,
0xed18c118: types.EncryptedMessage,
0x23734b06: types.EncryptedMessageService,
0xc0e24635: types.messages.DhConfigNotModified,
0x2c221edd: types.messages.DhConfig,
0x560f8935: types.messages.SentEncryptedMessage,
0x9493ff32: types.messages.SentEncryptedFile,
0x72f0eaae: types.InputDocumentEmpty,
0x1abfb575: types.InputDocument,
0x36f8c871: types.DocumentEmpty,
0x8fd4c4d8: types.Document,
0x17c6b5f6: types.help.Support,
0x9fd40bd8: types.NotifyPeer,
0xb4c83b4c: types.NotifyUsers,
0xc007cec3: types.NotifyChats,
0xd612e8ef: types.NotifyBroadcasts,
0x226e6308: types.NotifyForumTopic,
0x16bf744e: types.SendMessageTypingAction,
0xfd5ec8f5: types.SendMessageCancelAction,
0xa187d66f: types.SendMessageRecordVideoAction,
0xe9763aec: types.SendMessageUploadVideoAction,
0xd52f73f7: types.SendMessageRecordAudioAction,
0xf351d7ab: types.SendMessageUploadAudioAction,
0xd1d34a26: types.SendMessageUploadPhotoAction,
0xaa0cd9e4: types.SendMessageUploadDocumentAction,
0x176f8ba1: types.SendMessageGeoLocationAction,
0x628cbc6f: types.SendMessageChooseContactAction,
0xdd6a8f48: types.SendMessageGamePlayAction,
0x88f27fbc: types.SendMessageRecordRoundAction,
0x243e1c66: types.SendMessageUploadRoundAction,
0xd92c2285: types.SpeakingInGroupCallAction,
0xdbda9246: types.SendMessageHistoryImportAction,
0xb05ac6b1: types.SendMessageChooseStickerAction,
0x25972bcb: types.SendMessageEmojiInteraction,
0xb665902e: types.SendMessageEmojiInteractionSeen,
0xb3134d9d: types.contacts.Found,
0x4f96cb18: types.InputPrivacyKeyStatusTimestamp,
0xbdfb0426: types.InputPrivacyKeyChatInvite,
0xfabadc5f: types.InputPrivacyKeyPhoneCall,
0xdb9e70d2: types.InputPrivacyKeyPhoneP2P,
0xa4dd4c08: types.InputPrivacyKeyForwards,
0x5719bacc: types.InputPrivacyKeyProfilePhoto,
0x0352dafa: types.InputPrivacyKeyPhoneNumber,
0xd1219bdd: types.InputPrivacyKeyAddedByPhone,
0xaee69d68: types.InputPrivacyKeyVoiceMessages,
0xbc2eab30: types.PrivacyKeyStatusTimestamp,
0x500e6dfa: types.PrivacyKeyChatInvite,
0x3d662b7b: types.PrivacyKeyPhoneCall,
0x39491cc8: types.PrivacyKeyPhoneP2P,
0x69ec56a3: types.PrivacyKeyForwards,
0x96151fed: types.PrivacyKeyProfilePhoto,
0xd19ae46d: types.PrivacyKeyPhoneNumber,
0x42ffd42b: types.PrivacyKeyAddedByPhone,
0x0697f414: types.PrivacyKeyVoiceMessages,
0x0d09e07b: types.InputPrivacyValueAllowContacts,
0x184b35ce: types.InputPrivacyValueAllowAll,
0x131cc67f: types.InputPrivacyValueAllowUsers,
0x0ba52007: types.InputPrivacyValueDisallowContacts,
0xd66b66c9: types.InputPrivacyValueDisallowAll,
0x90110467: types.InputPrivacyValueDisallowUsers,
0x840649cf: types.InputPrivacyValueAllowChatParticipants,
0xe94f0f86: types.InputPrivacyValueDisallowChatParticipants,
0xfffe1bac: types.PrivacyValueAllowContacts,
0x65427b82: types.PrivacyValueAllowAll,
0xb8905fb2: types.PrivacyValueAllowUsers,
0xf888fa1a: types.PrivacyValueDisallowContacts,
0x8b73e763: types.PrivacyValueDisallowAll,
0xe4621141: types.PrivacyValueDisallowUsers,
0x6b134e8e: types.PrivacyValueAllowChatParticipants,
0x41c87565: types.PrivacyValueDisallowChatParticipants,
0x50a04e45: types.account.PrivacyRules,
0xb8d0afdf: types.AccountDaysTTL,
0x6c37c15c: types.DocumentAttributeImageSize,
0x11b58939: types.DocumentAttributeAnimated,
0x6319d612: types.DocumentAttributeSticker,
0x0ef02ce6: types.DocumentAttributeVideo,
0x9852f9c6: types.DocumentAttributeAudio,
0x15590068: types.DocumentAttributeFilename,
0x9801d2f7: types.DocumentAttributeHasStickers,
0xfd149899: types.DocumentAttributeCustomEmoji,
0xf1749a22: types.messages.StickersNotModified,
0x30a6ec7e: types.messages.Stickers,
0x12b299d4: types.StickerPack,
0xe86602c3: types.messages.AllStickersNotModified,
0xcdbbcebb: types.messages.AllStickers,
0x84d19185: types.messages.AffectedMessages,
0xeb1477e8: types.WebPageEmpty,
0xc586da1c: types.WebPagePending,
0xe89c45b2: types.WebPage,
0x7311ca11: types.WebPageNotModified,
0xad01d61d: types.Authorization,
0x4bff8ea0: types.account.Authorizations,
0x957b50fb: types.account.Password,
0x9a5c33e5: types.account.PasswordSettings,
0xc23727c9: types.account.PasswordInputSettings,
0x137948a5: types.auth.PasswordRecovery,
0xa384b779: types.ReceivedNotifyMessage,
0x0ab4a819: types.ChatInviteExported,
0xed107ab7: types.ChatInvitePublicJoinRequests,
0x5a686d7c: types.ChatInviteAlready,
0x300c44c1: types.ChatInvite,
0x61695cb0: types.ChatInvitePeek,
0xffb62b95: types.InputStickerSetEmpty,
0x9de7a269: types.InputStickerSetID,
0x861cc8a0: types.InputStickerSetShortName,
0x028703c8: types.InputStickerSetAnimatedEmoji,
0xe67f520e: types.InputStickerSetDice,
0x0cde3739: types.InputStickerSetAnimatedEmojiAnimations,
0xc88b3b02: types.InputStickerSetPremiumGifts,
0x04c4d4ce: types.InputStickerSetEmojiGenericAnimations,
0x29d0f5ee: types.InputStickerSetEmojiDefaultStatuses,
0x44c1f8e9: types.InputStickerSetEmojiDefaultTopicIcons,
0x2dd14edc: types.StickerSet,
0x6e153f16: types.messages.StickerSet,
0xd3f924eb: types.messages.StickerSetNotModified,
0xc27ac8c7: types.BotCommand,
0x8f300b57: types.BotInfo,
0xa2fa4880: types.KeyboardButton,
0x258aff05: types.KeyboardButtonUrl,
0x35bbdb6b: types.KeyboardButtonCallback,
0xb16a6c29: types.KeyboardButtonRequestPhone,
0xfc796b3f: types.KeyboardButtonRequestGeoLocation,
0x93b9fbb5: types.KeyboardButtonSwitchInline,
0x50f41ccf: types.KeyboardButtonGame,
0xafd93fbb: types.KeyboardButtonBuy,
0x10b78d29: types.KeyboardButtonUrlAuth,
0xd02e7fd4: types.InputKeyboardButtonUrlAuth,
0xbbc7515d: types.KeyboardButtonRequestPoll,
0xe988037b: types.InputKeyboardButtonUserProfile,
0x308660c1: types.KeyboardButtonUserProfile,
0x13767230: types.KeyboardButtonWebView,
0xa0c0505c: types.KeyboardButtonSimpleWebView,
0x0d0b468c: types.KeyboardButtonRequestPeer,
0x77608b83: types.KeyboardButtonRow,
0xa03e5b85: types.ReplyKeyboardHide,
0x86b40b08: types.ReplyKeyboardForceReply,
0x85dd99d1: types.ReplyKeyboardMarkup,
0x48a30254: types.ReplyInlineMarkup,
0xbb92ba95: types.MessageEntityUnknown,
0xfa04579d: types.MessageEntityMention,
0x6f635b0d: types.MessageEntityHashtag,
0x6cef8ac7: types.MessageEntityBotCommand,
0x6ed02538: types.MessageEntityUrl,
0x64e475c2: types.MessageEntityEmail,
0xbd610bc9: types.MessageEntityBold,
0x826f8b60: types.MessageEntityItalic,
0x28a20571: types.MessageEntityCode,
0x73924be0: types.MessageEntityPre,
0x76a6d327: types.MessageEntityTextUrl,
0xdc7b1140: types.MessageEntityMentionName,
0x208e68c9: types.InputMessageEntityMentionName,
0x9b69e34b: types.MessageEntityPhone,
0x4c4e743f: types.MessageEntityCashtag,
0x9c4e7e8b: types.MessageEntityUnderline,
0xbf0693d4: types.MessageEntityStrike,
0x020df5d0: types.MessageEntityBlockquote,
0x761e6af4: types.MessageEntityBankCard,
0x32ca960f: types.MessageEntitySpoiler,
0xc8cf05f8: types.MessageEntityCustomEmoji,
0xee8c1e86: types.InputChannelEmpty,
0xf35aec28: types.InputChannel,
0x5b934f9d: types.InputChannelFromMessage,
0x7f077ad9: types.contacts.ResolvedPeer,
0x0ae30253: types.MessageRange,
0x3e11affb: types.updates.ChannelDifferenceEmpty,
0xa4bcc6fe: types.updates.ChannelDifferenceTooLong,
0x2064674e: types.updates.ChannelDifference,
0x94d42ee7: types.ChannelMessagesFilterEmpty,
0xcd77d957: types.ChannelMessagesFilter,
0xc00c07c0: types.ChannelParticipant,
0x35a8bfa7: types.ChannelParticipantSelf,
0x2fe601d3: types.ChannelParticipantCreator,
0x34c3bb53: types.ChannelParticipantAdmin,
0x6df8014e: types.ChannelParticipantBanned,
0x1b03f006: types.ChannelParticipantLeft,
0xde3f3c79: types.ChannelParticipantsRecent,
0xb4608969: types.ChannelParticipantsAdmins,
0xa3b54985: types.ChannelParticipantsKicked,
0xb0d1865b: types.ChannelParticipantsBots,
0x1427a5e1: types.ChannelParticipantsBanned,
0x0656ac4b: types.ChannelParticipantsSearch,
0xbb6ae88d: types.ChannelParticipantsContacts,
0xe04b5ceb: types.ChannelParticipantsMentions,
0x9ab0feaf: types.channels.ChannelParticipants,
0xf0173fe9: types.channels.ChannelParticipantsNotModified,
0xdfb80317: types.channels.ChannelParticipant,
0x780a0310: types.help.TermsOfService,
0xe8025ca2: types.messages.SavedGifsNotModified,
0x84a02a0d: types.messages.SavedGifs,
0x3380c786: types.InputBotInlineMessageMediaAuto,
0x3dcd7a87: types.InputBotInlineMessageText,
0x96929a85: types.InputBotInlineMessageMediaGeo,
0x417bbf11: types.InputBotInlineMessageMediaVenue,
0xa6edbffd: types.InputBotInlineMessageMediaContact,
0x4b425864: types.InputBotInlineMessageGame,
0xd7e78225: types.InputBotInlineMessageMediaInvoice,
0x88bf9319: types.InputBotInlineResult,
0xa8d864a7: types.InputBotInlineResultPhoto,
0xfff8fdc4: types.InputBotInlineResultDocument,
0x4fa417f2: types.InputBotInlineResultGame,
0x764cf810: types.BotInlineMessageMediaAuto,
0x8c7f65e2: types.BotInlineMessageText,
0x051846fd: types.BotInlineMessageMediaGeo,
0x8a86659c: types.BotInlineMessageMediaVenue,
0x18d1cdc2: types.BotInlineMessageMediaContact,
0x354a9b09: types.BotInlineMessageMediaInvoice,
0x11965f3a: types.BotInlineResult,
0x17db940b: types.BotInlineMediaResult,
0xe021f2f6: types.messages.BotResults,
0x5dab1af4: types.ExportedMessageLink,
0x5f777dce: types.MessageFwdHeader,
0x72a3158c: types.auth.CodeTypeSms,
0x741cd3e3: types.auth.CodeTypeCall,
0x226ccefb: types.auth.CodeTypeFlashCall,
0xd61ad6ee: types.auth.CodeTypeMissedCall,
0x06ed998c: types.auth.CodeTypeFragmentSms,
0x3dbb5986: types.auth.SentCodeTypeApp,
0xc000bba2: types.auth.SentCodeTypeSms,
0x5353e5a7: types.auth.SentCodeTypeCall,
0xab03c6d9: types.auth.SentCodeTypeFlashCall,
0x82006484: types.auth.SentCodeTypeMissedCall,
0xf450f59b: types.auth.SentCodeTypeEmailCode,
0xa5491dea: types.auth.SentCodeTypeSetUpEmailRequired,
0xd9565c39: types.auth.SentCodeTypeFragmentSms,
0xe57b1432: types.auth.SentCodeTypeFirebaseSms,
0x36585ea4: types.messages.BotCallbackAnswer,
0x26b5dde6: types.messages.MessageEditData,
0x890c3d89: types.InputBotInlineMessageID,
0xb6d915d7: types.InputBotInlineMessageID64,
0x3c20629f: types.InlineBotSwitchPM,
0x3371c354: types.messages.PeerDialogs,
0xedcdc05b: types.TopPeer,
0xab661b5b: types.TopPeerCategoryBotsPM,
0x148677e2: types.TopPeerCategoryBotsInline,
0x0637b7ed: types.TopPeerCategoryCorrespondents,
0xbd17a14a: types.TopPeerCategoryGroups,
0x161d9628: types.TopPeerCategoryChannels,
0x1e76a78c: types.TopPeerCategoryPhoneCalls,
0xa8406ca9: types.TopPeerCategoryForwardUsers,
0xfbeec0f0: types.TopPeerCategoryForwardChats,
0xfb834291: types.TopPeerCategoryPeers,
0xde266ef5: types.contacts.TopPeersNotModified,
0x70b772a8: types.contacts.TopPeers,
0xb52c939d: types.contacts.TopPeersDisabled,
0x1b0c841a: types.DraftMessageEmpty,
0xfd8e711f: types.DraftMessage,
0xc6dc0c66: types.messages.FeaturedStickersNotModified,
0xbe382906: types.messages.FeaturedStickers,
0x0b17f890: types.messages.RecentStickersNotModified,
0x88d37c56: types.messages.RecentStickers,
0x4fcba9c8: types.messages.ArchivedStickers,
0x38641628: types.messages.StickerSetInstallResultSuccess,
0x35e410a8: types.messages.StickerSetInstallResultArchive,
0x6410a5d2: types.StickerSetCovered,
0x3407e51b: types.StickerSetMultiCovered,
0x40d13c0e: types.StickerSetFullCovered,
0x77b15d1c: types.StickerSetNoCovered,
0xaed6dbb2: types.MaskCoords,
0x4a992157: types.InputStickeredMediaPhoto,
0x0438865b: types.InputStickeredMediaDocument,
0xbdf9653b: types.Game,
0x032c3e77: types.InputGameID,
0xc331e80a: types.InputGameShortName,
0x73a379eb: types.HighScore,
0x9a3bfd99: types.messages.HighScores,
0xdc3d824f: types.TextEmpty,
0x744694e0: types.TextPlain,
0x6724abc4: types.TextBold,
0xd912a59c: types.TextItalic,
0xc12622c4: types.TextUnderline,
0x9bf8bb95: types.TextStrike,
0x6c3f19b9: types.TextFixed,
0x3c2884c1: types.TextUrl,
0xde5a0dd6: types.TextEmail,
0x7e6260d7: types.TextConcat,
0xed6a8504: types.TextSubscript,
0xc7fb5e01: types.TextSuperscript,
0x034b8621: types.TextMarked,
0x1ccb966a: types.TextPhone,
0x081ccf4f: types.TextImage,
0x35553762: types.TextAnchor,
0x13567e8a: types.PageBlockUnsupported,
0x70abc3fd: types.PageBlockTitle,
0x8ffa9a1f: types.PageBlockSubtitle,
0xbaafe5e0: types.PageBlockAuthorDate,
0xbfd064ec: types.PageBlockHeader,
0xf12bb6e1: types.PageBlockSubheader,
0x467a0766: types.PageBlockParagraph,
0xc070d93e: types.PageBlockPreformatted,
0x48870999: types.PageBlockFooter,
0xdb20b188: types.PageBlockDivider,
0xce0d37b0: types.PageBlockAnchor,
0xe4e88011: types.PageBlockList,
0x263d7c26: types.PageBlockBlockquote,
0x4f4456d3: types.PageBlockPullquote,
0x1759c560: types.PageBlockPhoto,
0x7c8fe7b6: types.PageBlockVideo,
0x39f23300: types.PageBlockCover,
0xa8718dc5: types.PageBlockEmbed,
0xf259a80b: types.PageBlockEmbedPost,
0x65a0fa4d: types.PageBlockCollage,
0x031f9590: types.PageBlockSlideshow,
0xef1751b5: types.PageBlockChannel,
0x804361ea: types.PageBlockAudio,
0x1e148390: types.PageBlockKicker,
0xbf4dea82: types.PageBlockTable,
0x9a8ae1e1: types.PageBlockOrderedList,
0x76768bed: types.PageBlockDetails,
0x16115a96: types.PageBlockRelatedArticles,
0xa44f3ef6: types.PageBlockMap,
0x85e42301: types.PhoneCallDiscardReasonMissed,
0xe095c1a0: types.PhoneCallDiscardReasonDisconnect,
0x57adc690: types.PhoneCallDiscardReasonHangup,
0xfaf7e8c9: types.PhoneCallDiscardReasonBusy,
0x7d748d04: types.DataJSON,
0xcb296bf8: types.LabeledPrice,
0x3e85a91b: types.Invoice,
0xea02c27e: types.PaymentCharge,
0x1e8caaeb: types.PostAddress,
0x909c3f94: types.PaymentRequestedInfo,
0xcdc27a1f: types.PaymentSavedCredentialsCard,
0x1c570ed1: types.WebDocument,
0xf9c8bcc6: types.WebDocumentNoProxy,
0x9bed434d: types.InputWebDocument,
0xc239d686: types.InputWebFileLocation,
0x9f2221c9: types.InputWebFileGeoPointLocation,
0xf46fe924: types.InputWebFileAudioAlbumThumbLocation,
0x21e753bc: types.upload.WebFile,
0xa0058751: types.payments.PaymentForm,
0xd1451883: types.payments.ValidatedRequestedInfo,
0x4e5f810d: types.payments.PaymentResult,
0xd8411139: types.payments.PaymentVerificationNeeded,
0x70c4fe03: types.payments.PaymentReceipt,
0xfb8fe43c: types.payments.SavedInfo,
0xc10eb2cf: types.InputPaymentCredentialsSaved,
0x3417d728: types.InputPaymentCredentials,
0x0aa1c39f: types.InputPaymentCredentialsApplePay,
0x8ac32801: types.InputPaymentCredentialsGooglePay,
0xdb64fd34: types.account.TmpPassword,
0xb6213cdf: types.ShippingOption,
0x32da9e9c: types.InputStickerSetItem,
0x1e36fded: types.InputPhoneCall,
0x5366c915: types.PhoneCallEmpty,
0xc5226f17: types.PhoneCallWaiting,
0x14b0ed0c: types.PhoneCallRequested,
0x3660c311: types.PhoneCallAccepted,
0x967f7c67: types.PhoneCall,
0x50ca4de1: types.PhoneCallDiscarded,
0x9cc123c7: types.PhoneConnection,
0x635fe375: types.PhoneConnectionWebrtc,
0xfc878fc8: types.PhoneCallProtocol,
0xec82e140: types.phone.PhoneCall,
0xeea8e46e: types.upload.CdnFileReuploadNeeded,
0xa99fca4f: types.upload.CdnFile,
0xc982eaba: types.CdnPublicKey,
0x5725e40a: types.CdnConfig,
0xcad181f6: types.LangPackString,
0x6c47ac9f: types.LangPackStringPluralized,
0x2979eeb2: types.LangPackStringDeleted,
0xf385c1f6: types.LangPackDifference,
0xeeca5ce3: types.LangPackLanguage,
0xe6dfb825: types.ChannelAdminLogEventActionChangeTitle,
0x55188a2e: types.ChannelAdminLogEventActionChangeAbout,
0x6a4afc38: types.ChannelAdminLogEventActionChangeUsername,
0x434bd2af: types.ChannelAdminLogEventActionChangePhoto,
0x1b7907ae: types.ChannelAdminLogEventActionToggleInvites,
0x26ae0971: types.ChannelAdminLogEventActionToggleSignatures,
0xe9e82c18: types.ChannelAdminLogEventActionUpdatePinned,
0x709b2405: types.ChannelAdminLogEventActionEditMessage,
0x42e047bb: types.ChannelAdminLogEventActionDeleteMessage,
0x183040d3: types.ChannelAdminLogEventActionParticipantJoin,
0xf89777f2: types.ChannelAdminLogEventActionParticipantLeave,
0xe31c34d8: types.ChannelAdminLogEventActionParticipantInvite,
0xe6d83d7e: types.ChannelAdminLogEventActionParticipantToggleBan,
0xd5676710: types.ChannelAdminLogEventActionParticipantToggleAdmin,
0xb1c3caa7: types.ChannelAdminLogEventActionChangeStickerSet,
0x5f5c95f1: types.ChannelAdminLogEventActionTogglePreHistoryHidden,
0x2df5fc0a: types.ChannelAdminLogEventActionDefaultBannedRights,
0x8f079643: types.ChannelAdminLogEventActionStopPoll,
0x050c7ac8: types.ChannelAdminLogEventActionChangeLinkedChat,
0x0e6b76ae: types.ChannelAdminLogEventActionChangeLocation,
0x53909779: types.ChannelAdminLogEventActionToggleSlowMode,
0x23209745: types.ChannelAdminLogEventActionStartGroupCall,
0xdb9f9140: types.ChannelAdminLogEventActionDiscardGroupCall,
0xf92424d2: types.ChannelAdminLogEventActionParticipantMute,
0xe64429c0: types.ChannelAdminLogEventActionParticipantUnmute,
0x56d6a247: types.ChannelAdminLogEventActionToggleGroupCallSetting,
0xfe9fc158: types.ChannelAdminLogEventActionParticipantJoinByInvite,
0x5a50fca4: types.ChannelAdminLogEventActionExportedInviteDelete,
0x410a134e: types.ChannelAdminLogEventActionExportedInviteRevoke,
0xe90ebb59: types.ChannelAdminLogEventActionExportedInviteEdit,
0x3e7f6847: types.ChannelAdminLogEventActionParticipantVolume,
0x6e941a38: types.ChannelAdminLogEventActionChangeHistoryTTL,
0xafb6144a: types.ChannelAdminLogEventActionParticipantJoinByRequest,
0xcb2ac766: types.ChannelAdminLogEventActionToggleNoForwards,
0x278f2868: types.ChannelAdminLogEventActionSendMessage,
0xbe4e0ef8: types.ChannelAdminLogEventActionChangeAvailableReactions,
0xf04fb3a9: types.ChannelAdminLogEventActionChangeUsernames,
0x02cc6383: types.ChannelAdminLogEventActionToggleForum,
0x58707d28: types.ChannelAdminLogEventActionCreateTopic,
0xf06fe208: types.ChannelAdminLogEventActionEditTopic,
0xae168909: types.ChannelAdminLogEventActionDeleteTopic,
0x5d8d353b: types.ChannelAdminLogEventActionPinTopic,
0x64f36dfc: types.ChannelAdminLogEventActionToggleAntiSpam,
0x1fad68cd: types.ChannelAdminLogEvent,
0xed8af74d: types.channels.AdminLogResults,
0xea107ae4: types.ChannelAdminLogEventsFilter,
0x5ce14175: types.PopularContact,
0x9e8fa6d3: types.messages.FavedStickersNotModified,
0x2cb51097: types.messages.FavedStickers,
0x46e1d13d: types.RecentMeUrlUnknown,
0xb92c09e2: types.RecentMeUrlUser,
0xb2da71d2: types.RecentMeUrlChat,
0xeb49081d: types.RecentMeUrlChatInvite,
0xbc0a57dc: types.RecentMeUrlStickerSet,
0x0e0310d7: types.help.RecentMeUrls,
0x1cc6e91f: types.InputSingleMedia,
0xa6f8f452: types.WebAuthorization,
0xed56c9fc: types.account.WebAuthorizations,
0xa676a322: types.InputMessageID,
0xbad88395: types.InputMessageReplyTo,
0x86872538: types.InputMessagePinned,
0xacfa1a7e: types.InputMessageCallbackQuery,
0xfcaafeb7: types.InputDialogPeer,
0x64600527: types.InputDialogPeerFolder,
0xe56dbf05: types.DialogPeer,
0x514519e2: types.DialogPeerFolder,
0x0d54b65d: types.messages.FoundStickerSetsNotModified,
0x8af09dd2: types.messages.FoundStickerSets,
0xf39b035c: types.FileHash,
0x75588b3f: types.InputClientProxy,
0xe3309f7f: types.help.TermsOfServiceUpdateEmpty,
0x28ecf961: types.help.TermsOfServiceUpdate,
0x3334b0f0: types.InputSecureFileUploaded,
0x5367e5be: types.InputSecureFile,
0x64199744: types.SecureFileEmpty,
0x7d09c27e: types.SecureFile,
0x8aeabec3: types.SecureData,
0x7d6099dd: types.SecurePlainPhone,
0x21ec5a5f: types.SecurePlainEmail,
0x9d2a81e3: types.SecureValueTypePersonalDetails,
0x3dac6a00: types.SecureValueTypePassport,
0x06e425c4: types.SecureValueTypeDriverLicense,
0xa0d0744b: types.SecureValueTypeIdentityCard,
0x99a48f23: types.SecureValueTypeInternalPassport,
0xcbe31e26: types.SecureValueTypeAddress,
0xfc36954e: types.SecureValueTypeUtilityBill,
0x89137c0d: types.SecureValueTypeBankStatement,
0x8b883488: types.SecureValueTypeRentalAgreement,
0x99e3806a: types.SecureValueTypePassportRegistration,
0xea02ec33: types.SecureValueTypeTemporaryRegistration,
0xb320aadb: types.SecureValueTypePhone,
0x8e3ca7ee: types.SecureValueTypeEmail,
0x187fa0ca: types.SecureValue,
0xdb21d0a7: types.InputSecureValue,
0xed1ecdb0: types.SecureValueHash,
0xe8a40bd9: types.SecureValueErrorData,
0x00be3dfa: types.SecureValueErrorFrontSide,
0x868a2aa5: types.SecureValueErrorReverseSide,
0xe537ced6: types.SecureValueErrorSelfie,
0x7a700873: types.SecureValueErrorFile,
0x666220e9: types.SecureValueErrorFiles,
0x869d758f: types.SecureValueError,
0xa1144770: types.SecureValueErrorTranslationFile,
0x34636dd8: types.SecureValueErrorTranslationFiles,
0x33f0ea47: types.SecureCredentialsEncrypted,
0xad2e1cd8: types.account.AuthorizationForm,
0x811f854f: types.account.SentEmailCode,
0x66afa166: types.help.DeepLinkInfoEmpty,
0x6a4ee832: types.help.DeepLinkInfo,
0x1142bd56: types.SavedPhoneContact,
0x4dba4501: types.account.Takeout,
0xd45ab096: types.PasswordKdfAlgoUnknown,
0x3a912d4a: types.PasswordKdfAlgoSHA256SHA256PBKDF2HMACSHA512iter100000SHA256ModPow,
0x004a8537: types.SecurePasswordKdfAlgoUnknown,
0xbbf2dda0: types.SecurePasswordKdfAlgoPBKDF2HMACSHA512iter100000,
0x86471d92: types.SecurePasswordKdfAlgoSHA512,
0x1527bcac: types.SecureSecretSettings,
0x9880f658: types.InputCheckPasswordEmpty,
0xd27ff082: types.InputCheckPasswordSRP,
0x829d99da: types.SecureRequiredType,
0x027477b4: types.SecureRequiredTypeOneOf,
0xbfb9f457: types.help.PassportConfigNotModified,
0xa098d6af: types.help.PassportConfig,
0x1d1b1245: types.InputAppEvent,
0xc0de1bd9: types.JsonObjectValue,
0x3f6d7b68: types.JsonNull,
0xc7345e6a: types.JsonBool,
0x2be0dfa4: types.JsonNumber,
0xb71e767a: types.JsonString,
0xf7444763: types.JsonArray,
0x99c1d49d: types.JsonObject,
0x34566b6a: types.PageTableCell,
0xe0c0c5e5: types.PageTableRow,
0x6f747657: types.PageCaption,
0xb92fb6cd: types.PageListItemText,
0x25e073fc: types.PageListItemBlocks,
0x5e068047: types.PageListOrderedItemText,
0x98dd8936: types.PageListOrderedItemBlocks,
0xb390dc08: types.PageRelatedArticle,
0x98657f0d: types.Page,
0x8c05f1c9: types.help.SupportName,
0xf3ae2eed: types.help.UserInfoEmpty,
0x01eb3758: types.help.UserInfo,
0x6ca9c2e9: types.PollAnswer,
0x86e18161: types.Poll,
0x3b6ddad2: types.PollAnswerVoters,
0xdcb82ea3: types.PollResults,
0xf041e250: types.ChatOnlines,
0x47a971e0: types.StatsURL,
0x5fb224d5: types.ChatAdminRights,
0x9f120418: types.ChatBannedRights,
0xe630b979: types.InputWallPaper,
0x72091c80: types.InputWallPaperSlug,
0x967a462e: types.InputWallPaperNoFile,
0x1c199183: types.account.WallPapersNotModified,
0xcdc3858c: types.account.WallPapers,
0xad253d78: types.CodeSettings,
0x1dc1bca4: types.WallPaperSettings,
0x8efab953: types.AutoDownloadSettings,
0x63cacf26: types.account.AutoDownloadSettings,
0xd5b3b9f9: types.EmojiKeyword,
0x236df622: types.EmojiKeywordDeleted,
0x5cc761bd: types.EmojiKeywordsDifference,
0xa575739d: types.EmojiURL,
0xb3fb5361: types.EmojiLanguage,
0xff544e65: types.Folder,
0xfbd2c296: types.InputFolderPeer,
0xe9baa668: types.FolderPeer,
0xe844ebff: types.messages.SearchCounter,
0x92d33a0e: types.UrlAuthResultRequest,
0x8f8c0e4e: types.UrlAuthResultAccepted,
0xa9d6db1f: types.UrlAuthResultDefault,
0xbfb5ad8b: types.ChannelLocationEmpty,
0x209b82db: types.ChannelLocation,
0xca461b5d: types.PeerLocated,
0xf8ec284b: types.PeerSelfLocated,
0xd072acb4: types.RestrictionReason,
0x3c5693e9: types.InputTheme,
0xf5890df1: types.InputThemeSlug,
0xa00e67d6: types.Theme,
0xf41eb622: types.account.ThemesNotModified,
0x9a3d8c6d: types.account.Themes,
0x629f1980: types.auth.LoginToken,
0x068e9916: types.auth.LoginTokenMigrateTo,
0x390d5c5e: types.auth.LoginTokenSuccess,
0x57e28221: types.account.ContentSettings,
0xa927fec5: types.messages.InactiveChats,
0xc3a12462: types.BaseThemeClassic,
0xfbd81688: types.BaseThemeDay,
0xb7b31ea8: types.BaseThemeNight,
0x6d5f77ee: types.BaseThemeTinted,
0x5b11125a: types.BaseThemeArctic,
0x8fde504f: types.InputThemeSettings,
0xfa58b6d4: types.ThemeSettings,
0x54b56617: types.WebPageAttributeTheme,
0x34d247b4: types.MessageUserVote,
0x3ca5b0ec: types.MessageUserVoteInputOption,
0x8a65e557: types.MessageUserVoteMultiple,
0x0823f649: types.messages.VotesList,
0xf568028a: types.BankCardOpenUrl,
0x3e24e573: types.payments.BankCardData,
0x7438f7e8: types.DialogFilter,
0x363293ae: types.DialogFilterDefault,
0xd64a04a8: types.DialogFilterChatlist,
0x77744d4a: types.DialogFilterSuggested,
0xb637edaf: types.StatsDateRangeDays,
0xcb43acde: types.StatsAbsValueAndPrev,
0xcbce2fe0: types.StatsPercentValue,
0x4a27eb2d: types.StatsGraphAsync,
0xbedc9822: types.StatsGraphError,
0x8ea464b6: types.StatsGraph,
0xad4fc9bd: types.MessageInteractionCounters,
0xbdf78394: types.stats.BroadcastStats,
0x98f6ac75: types.help.PromoDataEmpty,
0x8c39793f: types.help.PromoData,
0xde33b094: types.VideoSize,
0xf85c413c: types.VideoSizeEmojiMarkup,
0x0da082fe: types.VideoSizeStickerMarkup,
0x9d04af9b: types.StatsGroupTopPoster,
0xd7584c87: types.StatsGroupTopAdmin,
0x535f779d: types.StatsGroupTopInviter,
0xef7ff916: types.stats.MegagroupStats,
0xbea2f424: types.GlobalPrivacySettings,
0x4203c5ef: types.help.CountryCode,
0xc3878e23: types.help.Country,
0x93cc1f32: types.help.CountriesListNotModified,
0x87d0759e: types.help.CountriesList,
0x455b853d: types.MessageViews,
0xb6c4f543: types.messages.MessageViews,
0xa6341782: types.messages.DiscussionMessage,
0xa6d57763: types.MessageReplyHeader,
0x83d60fc2: types.MessageReplies,
0xe8fd8014: types.PeerBlocked,
0x8999f295: types.stats.MessageStats,
0x7780bcb4: types.GroupCallDiscarded,
0xd597650c: types.GroupCall,
0xd8aa840f: types.InputGroupCall,
0xeba636fe: types.GroupCallParticipant,
0x9e727aad: types.phone.GroupCall,
0xf47751b6: types.phone.GroupParticipants,
0x3081ed9d: types.InlineQueryPeerTypeSameBotPM,
0x833c0fac: types.InlineQueryPeerTypePM,
0xd766c50a: types.InlineQueryPeerTypeChat,
0x5ec4be43: types.InlineQueryPeerTypeMegagroup,
0x6334ee9a: types.InlineQueryPeerTypeBroadcast,
0x0e3b2d0c: types.InlineQueryPeerTypeBotPM,
0x1662af0b: types.messages.HistoryImport,
0x5e0fb7b9: types.messages.HistoryImportParsed,
0xef8d3e6c: types.messages.AffectedFoundMessages,
0x8c5adfd9: types.ChatInviteImporter,
0xbdc62dcc: types.messages.ExportedChatInvites,
0x1871be50: types.messages.ExportedChatInvite,
0x222600ef: types.messages.ExportedChatInviteReplaced,
0x81b6b00a: types.messages.ChatInviteImporters,
0xf2ecef23: types.ChatAdminWithInvites,
0xb69b72d7: types.messages.ChatAdminsWithInvites,
0xa24de717: types.messages.CheckedHistoryImportPeer,
0xafe5623f: types.phone.JoinAsPeers,
0x204bd158: types.phone.ExportedGroupCallInvite,
0xdcb118b7: types.GroupCallParticipantVideoSourceGroup,
0x67753ac8: types.GroupCallParticipantVideo,
0x85fea03f: types.stickers.SuggestedShortName,
0x2f6cb2ab: types.BotCommandScopeDefault,
0x3c4f04d8: types.BotCommandScopeUsers,
0x6fe1a881: types.BotCommandScopeChats,
0xb9aa606a: types.BotCommandScopeChatAdmins,
0xdb9d897d: types.BotCommandScopePeer,
0x3fd863d1: types.BotCommandScopePeerAdmins,
0x0a1321f3: types.BotCommandScopePeerUser,
0xe3779861: types.account.ResetPasswordFailedWait,
0xe9effc7d: types.account.ResetPasswordRequestedWait,
0xe926d63e: types.account.ResetPasswordOk,
0xfc25b828: types.SponsoredMessage,
0xc9ee1d87: types.messages.SponsoredMessages,
0x1839490f: types.messages.SponsoredMessagesEmpty,
0xc9b0539f: types.SearchResultsCalendarPeriod,
0x147ee23c: types.messages.SearchResultsCalendar,
0x7f648b67: types.SearchResultPosition,
0x53b22baf: types.messages.SearchResultsPositions,
0xf496b0c6: types.channels.SendAsPeers,
0x3b6d152e: types.users.UserFull,
0x6880b94d: types.messages.PeerSettings,
0xc3a2835f: types.auth.LoggedOut,
0xa3d1cb80: types.ReactionCount,
0x4f2b9479: types.MessageReactions,
0x31bd492d: types.messages.MessageReactionsList,
0xc077ec01: types.AvailableReaction,
0x9f071957: types.messages.AvailableReactionsNotModified,
0x768e3aad: types.messages.AvailableReactions,
0x8c79b63c: types.MessagePeerReaction,
0x80eb48af: types.GroupCallStreamChannel,
0xd0e482b2: types.phone.GroupCallStreamChannels,
0x2dbf3432: types.phone.GroupCallStreamRtmpUrl,
0x4576f3f0: types.AttachMenuBotIconColor,
0xb2a7386b: types.AttachMenuBotIcon,
0xc8aa2cd2: types.AttachMenuBot,
0xf1d88a5c: types.AttachMenuBotsNotModified,
0x3c4301c0: types.AttachMenuBots,
0x93bf667f: types.AttachMenuBotsBot,
0x0c14557c: types.WebViewResultUrl,
0x882f76bb: types.SimpleWebViewResultUrl,
0x0c94511c: types.WebViewMessageSent,
0x7533a588: types.BotMenuButtonDefault,
0x4258c205: types.BotMenuButtonCommands,
0xc7b57ce6: types.BotMenuButton,
0xfbf6e8b1: types.account.SavedRingtonesNotModified,
0xc1e92cc5: types.account.SavedRingtones,
0x97e8bebe: types.NotificationSoundDefault,
0x6f0c34df: types.NotificationSoundNone,
0x830b9ae4: types.NotificationSoundLocal,
0xff6c8049: types.NotificationSoundRingtone,
0xb7263f6d: types.account.SavedRingtone,
0x1f307eb7: types.account.SavedRingtoneConverted,
0x7d6be90e: types.AttachMenuPeerTypeSameBotPM,
0xc32bfa1a: types.AttachMenuPeerTypeBotPM,
0xf146d31f: types.AttachMenuPeerTypePM,
0x0509113f: types.AttachMenuPeerTypeChat,
0x7bfbdefc: types.AttachMenuPeerTypeBroadcast,
0xc5b56859: types.InputInvoiceMessage,
0xc326caef: types.InputInvoiceSlug,
0xaed0cbd9: types.payments.ExportedInvoice,
0x93752c52: types.messages.TranscribedAudio,
0x5334759c: types.help.PremiumPromo,
0xa6751e66: types.InputStorePaymentPremiumSubscription,
0x616f7fe8: types.InputStorePaymentGiftPremium,
0x74c34319: types.PremiumGiftOption,
0x88f8f21b: types.PaymentFormMethod,
0x2de11aae: types.EmojiStatusEmpty,
0x929b619d: types.EmojiStatus,
0xfa30a8c7: types.EmojiStatusUntil,
0xd08ce645: types.account.EmojiStatusesNotModified,
0x90c467d1: types.account.EmojiStatuses,
0x79f5d419: types.ReactionEmpty,
0x1b2286b8: types.ReactionEmoji,
0x8935fc73: types.ReactionCustomEmoji,
0xeafc32bc: types.ChatReactionsNone,
0x52928bca: types.ChatReactionsAll,
0x661d4037: types.ChatReactionsSome,
0xb06fdbdf: types.messages.ReactionsNotModified,
0xeafdf716: types.messages.Reactions,
0x4345be73: types.EmailVerifyPurposeLoginSetup,
0x527d22eb: types.EmailVerifyPurposeLoginChange,
0xbbf51685: types.EmailVerifyPurposePassport,
0x922e55a9: types.EmailVerificationCode,
0xdb909ec2: types.EmailVerificationGoogle,
0x96d074fd: types.EmailVerificationApple,
0x2b96cd1b: types.account.EmailVerified,
0xe1bb0d61: types.account.EmailVerifiedLogin,
0x5f2d1df2: types.PremiumSubscriptionOption,
0xb81c7034: types.SendAsPeer,
0xad628cc8: types.MessageExtendedMediaPreview,
0xee479c64: types.MessageExtendedMedia,
0xfcfeb29c: types.StickerKeyword,
0xb4073647: types.Username,
0x023f109b: types.ForumTopicDeleted,
0x71701da9: types.ForumTopic,
0x367617d3: types.messages.ForumTopics,
0x43b46b20: types.DefaultHistoryTTL,
0x41bf109b: types.ExportedContactToken,
0x5f3b8a00: types.RequestPeerTypeUser,
0xc9f06e1b: types.RequestPeerTypeChat,
0x339bef6c: types.RequestPeerTypeBroadcast,
0x481eadfa: types.EmojiListNotModified,
0x7a1e11d1: types.EmojiList,
0x7a9abda9: types.EmojiGroup,
0x6fb4ad87: types.messages.EmojiGroupsNotModified,
0x881fb94b: types.messages.EmojiGroups,
0x751f3146: types.TextWithEntities,
0x33db32f8: types.messages.TranslateResult,
0xc84834ce: types.AutoSaveSettings,
0x81602d47: types.AutoSaveException,
0x4c3e069d: types.account.AutoSaveSettings,
0x7cde641d: types.help.AppConfigNotModified,
0xdd18782e: types.help.AppConfig,
0xa920bd7a: types.InputBotAppID,
0x908c0407: types.InputBotAppShortName,
0x5da674b7: types.BotAppNotModified,
0x95fcd1d6: types.BotApp,
0xeb50adf5: types.messages.BotApp,
0x3c1b4f0d: types.AppWebViewResultUrl,
0xb57295d5: types.InlineBotWebView,
0x4a4ff172: types.ReadParticipantDate,
0xf3e0da33: types.InputChatlistDialogFilter,
0x0c5181ac: types.ExportedChatlistInvite,
0x10e6e3a6: types.chatlists.ExportedChatlistInvite,
0x10ab6dc7: types.chatlists.ExportedInvites,
0xfa87f659: types.chatlists.ChatlistInviteAlready,
0x1dcd839d: types.chatlists.ChatlistInvite,
0x93bd878d: types.chatlists.ChatlistUpdates,
0xe8a775b0: types.bots.BotInfo,
0xcb9f372d: functions.InvokeAfterMsgRequest,
0x3dc4b4f0: functions.InvokeAfterMsgsRequest,
0xc1cd5ea9: functions.InitConnectionRequest,
0xda9b0d0d: functions.InvokeWithLayerRequest,
0xbf9459b7: functions.InvokeWithoutUpdatesRequest,
0x365275f2: functions.InvokeWithMessagesRangeRequest,
0xaca9fd2e: functions.InvokeWithTakeoutRequest,
0xa677244f: functions.auth.SendCodeRequest,
0x80eee427: functions.auth.SignUpRequest,
0x8d52a951: functions.auth.SignInRequest,
0x3e72ba19: functions.auth.LogOutRequest,
0x9fab0d1a: functions.auth.ResetAuthorizationsRequest,
0xe5bfffcd: functions.auth.ExportAuthorizationRequest,
0xa57a7dad: functions.auth.ImportAuthorizationRequest,
0xcdd42a05: functions.auth.BindTempAuthKeyRequest,
0x67a3ff2c: functions.auth.ImportBotAuthorizationRequest,
0xd18b4d16: functions.auth.CheckPasswordRequest,
0xd897bc66: functions.auth.RequestPasswordRecoveryRequest,
0x37096c70: functions.auth.RecoverPasswordRequest,
0x3ef1a9bf: functions.auth.ResendCodeRequest,
0x1f040578: functions.auth.CancelCodeRequest,
0x8e48a188: functions.auth.DropTempAuthKeysRequest,
0xb7e085fe: functions.auth.ExportLoginTokenRequest,
0x95ac5ce4: functions.auth.ImportLoginTokenRequest,
0xe894ad4d: functions.auth.AcceptLoginTokenRequest,
0x0d36bf79: functions.auth.CheckRecoveryPasswordRequest,
0x2db873a9: functions.auth.ImportWebTokenAuthorizationRequest,
0x89464b50: functions.auth.RequestFirebaseSmsRequest,
0x7e960193: functions.auth.ResetLoginEmailRequest,
0xec86017a: functions.account.RegisterDeviceRequest,
0x6a0d3206: functions.account.UnregisterDeviceRequest,
0x84be5b93: functions.account.UpdateNotifySettingsRequest,
0x12b3ad31: functions.account.GetNotifySettingsRequest,
0xdb7e1747: functions.account.ResetNotifySettingsRequest,
0x78515775: functions.account.UpdateProfileRequest,
0x6628562c: functions.account.UpdateStatusRequest,
0x07967d36: functions.account.GetWallPapersRequest,
0xc5ba3d86: functions.account.ReportPeerRequest,
0x2714d86c: functions.account.CheckUsernameRequest,
0x3e0bdd7c: functions.account.UpdateUsernameRequest,
0xdadbc950: functions.account.GetPrivacyRequest,
0xc9f81ce8: functions.account.SetPrivacyRequest,
0xa2c0cf74: functions.account.DeleteAccountRequest,
0x08fc711d: functions.account.GetAccountTTLRequest,
0x2442485e: functions.account.SetAccountTTLRequest,
0x82574ae5: functions.account.SendChangePhoneCodeRequest,
0x70c32edb: functions.account.ChangePhoneRequest,
0x38df3532: functions.account.UpdateDeviceLockedRequest,
0xe320c158: functions.account.GetAuthorizationsRequest,
0xdf77f3bc: functions.account.ResetAuthorizationRequest,
0x548a30f5: functions.account.GetPasswordRequest,
0x9cd4eaf9: functions.account.GetPasswordSettingsRequest,
0xa59b102f: functions.account.UpdatePasswordSettingsRequest,
0x1b3faa88: functions.account.SendConfirmPhoneCodeRequest,
0x5f2178c3: functions.account.ConfirmPhoneRequest,
0x449e0b51: functions.account.GetTmpPasswordRequest,
0x182e6d6f: functions.account.GetWebAuthorizationsRequest,
0x2d01b9ef: functions.account.ResetWebAuthorizationRequest,
0x682d2594: functions.account.ResetWebAuthorizationsRequest,
0xb288bc7d: functions.account.GetAllSecureValuesRequest,
0x73665bc2: functions.account.GetSecureValueRequest,
0x899fe31d: functions.account.SaveSecureValueRequest,
0xb880bc4b: functions.account.DeleteSecureValueRequest,
0xa929597a: functions.account.GetAuthorizationFormRequest,
0xf3ed4c73: functions.account.AcceptAuthorizationRequest,
0xa5a356f9: functions.account.SendVerifyPhoneCodeRequest,
0x4dd3a7f6: functions.account.VerifyPhoneRequest,
0x98e037bb: functions.account.SendVerifyEmailCodeRequest,
0x032da4cf: functions.account.VerifyEmailRequest,
0x8ef3eab0: functions.account.InitTakeoutSessionRequest,
0x1d2652ee: functions.account.FinishTakeoutSessionRequest,
0x8fdf1920: functions.account.ConfirmPasswordEmailRequest,
0x7a7f2a15: functions.account.ResendPasswordEmailRequest,
0xc1cbd5b6: functions.account.CancelPasswordEmailRequest,
0x9f07c728: functions.account.GetContactSignUpNotificationRequest,
0xcff43f61: functions.account.SetContactSignUpNotificationRequest,
0x53577479: functions.account.GetNotifyExceptionsRequest,
0xfc8ddbea: functions.account.GetWallPaperRequest,
0xe39a8f03: functions.account.UploadWallPaperRequest,
0x6c5a5b37: functions.account.SaveWallPaperRequest,
0xfeed5769: functions.account.InstallWallPaperRequest,
0xbb3b9804: functions.account.ResetWallPapersRequest,
0x56da0b3f: functions.account.GetAutoDownloadSettingsRequest,
0x76f36233: functions.account.SaveAutoDownloadSettingsRequest,
0x1c3db333: functions.account.UploadThemeRequest,
0x652e4400: functions.account.CreateThemeRequest,
0x2bf40ccc: functions.account.UpdateThemeRequest,
0xf257106c: functions.account.SaveThemeRequest,
0xc727bb3b: functions.account.InstallThemeRequest,
0x3a5869ec: functions.account.GetThemeRequest,
0x7206e458: functions.account.GetThemesRequest,
0xb574b16b: functions.account.SetContentSettingsRequest,
0x8b9b4dae: functions.account.GetContentSettingsRequest,
0x65ad71dc: functions.account.GetMultiWallPapersRequest,
0xeb2b4cf6: functions.account.GetGlobalPrivacySettingsRequest,
0x1edaaac2: functions.account.SetGlobalPrivacySettingsRequest,
0xfa8cc6f5: functions.account.ReportProfilePhotoRequest,
0x9308ce1b: functions.account.ResetPasswordRequest,
0x4c9409f6: functions.account.DeclinePasswordResetRequest,
0xd638de89: functions.account.GetChatThemesRequest,
0xbf899aa0: functions.account.SetAuthorizationTTLRequest,
0x40f48462: functions.account.ChangeAuthorizationSettingsRequest,
0xe1902288: functions.account.GetSavedRingtonesRequest,
0x3dea5b03: functions.account.SaveRingtoneRequest,
0x831a83a2: functions.account.UploadRingtoneRequest,
0xfbd3de6b: functions.account.UpdateEmojiStatusRequest,
0xd6753386: functions.account.GetDefaultEmojiStatusesRequest,
0x0f578105: functions.account.GetRecentEmojiStatusesRequest,
0x18201aae: functions.account.ClearRecentEmojiStatusesRequest,
0xef500eab: functions.account.ReorderUsernamesRequest,
0x58d6b376: functions.account.ToggleUsernameRequest,
0xe2750328: functions.account.GetDefaultProfilePhotoEmojisRequest,
0x915860ae: functions.account.GetDefaultGroupPhotoEmojisRequest,
0xadcbbcda: functions.account.GetAutoSaveSettingsRequest,
0xd69b8361: functions.account.SaveAutoSaveSettingsRequest,
0x53bc0020: functions.account.DeleteAutoSaveExceptionsRequest,
0x0d91a548: functions.users.GetUsersRequest,
0xb60f5918: functions.users.GetFullUserRequest,
0x90c894b5: functions.users.SetSecureValueErrorsRequest,
0x7adc669d: functions.contacts.GetContactIDsRequest,
0xc4a353ee: functions.contacts.GetStatusesRequest,
0x5dd69e12: functions.contacts.GetContactsRequest,
0x2c800be5: functions.contacts.ImportContactsRequest,
0x096a0e00: functions.contacts.DeleteContactsRequest,
0x1013fd9e: functions.contacts.DeleteByPhonesRequest,
0x68cc1411: functions.contacts.BlockRequest,
0xbea65d50: functions.contacts.UnblockRequest,
0xf57c350f: functions.contacts.GetBlockedRequest,
0x11f812d8: functions.contacts.SearchRequest,
0xf93ccba3: functions.contacts.ResolveUsernameRequest,
0x973478b6: functions.contacts.GetTopPeersRequest,
0x1ae373ac: functions.contacts.ResetTopPeerRatingRequest,
0x879537f1: functions.contacts.ResetSavedRequest,
0x82f1e39f: functions.contacts.GetSavedRequest,
0x8514bdda: functions.contacts.ToggleTopPeersRequest,
0xe8f463d0: functions.contacts.AddContactRequest,
0xf831a20f: functions.contacts.AcceptContactRequest,
0xd348bc44: functions.contacts.GetLocatedRequest,
0x29a8962c: functions.contacts.BlockFromRepliesRequest,
0x8af94344: functions.contacts.ResolvePhoneRequest,
0xf8654027: functions.contacts.ExportContactTokenRequest,
0x13005788: functions.contacts.ImportContactTokenRequest,
0x63c66506: functions.messages.GetMessagesRequest,
0xa0f4cb4f: functions.messages.GetDialogsRequest,
0x4423e6c5: functions.messages.GetHistoryRequest,
0xa0fda762: functions.messages.SearchRequest,
0x0e306d3a: functions.messages.ReadHistoryRequest,
0xb08f922a: functions.messages.DeleteHistoryRequest,
0xe58e95d2: functions.messages.DeleteMessagesRequest,
0x05a954c0: functions.messages.ReceivedMessagesRequest,
0x58943ee2: functions.messages.SetTypingRequest,
0x1cc20387: functions.messages.SendMessageRequest,
0x7547c966: functions.messages.SendMediaRequest,
0xc661bbc4: functions.messages.ForwardMessagesRequest,
0xcf1592db: functions.messages.ReportSpamRequest,
0xefd9a6a2: functions.messages.GetPeerSettingsRequest,
0x8953ab4e: functions.messages.ReportRequest,
0x49e9528f: functions.messages.GetChatsRequest,
0xaeb00b34: functions.messages.GetFullChatRequest,
0x73783ffd: functions.messages.EditChatTitleRequest,
0x35ddd674: functions.messages.EditChatPhotoRequest,
0xf24753e3: functions.messages.AddChatUserRequest,
0xa2185cab: functions.messages.DeleteChatUserRequest,
0x0034a818: functions.messages.CreateChatRequest,
0x26cf8950: functions.messages.GetDhConfigRequest,
0xf64daf43: functions.messages.RequestEncryptionRequest,
0x3dbc0415: functions.messages.AcceptEncryptionRequest,
0xf393aea0: functions.messages.DiscardEncryptionRequest,
0x791451ed: functions.messages.SetEncryptedTypingRequest,
0x7f4b690a: functions.messages.ReadEncryptedHistoryRequest,
0x44fa7a15: functions.messages.SendEncryptedRequest,
0x5559481d: functions.messages.SendEncryptedFileRequest,
0x32d439a4: functions.messages.SendEncryptedServiceRequest,
0x55a5bb66: functions.messages.ReceivedQueueRequest,
0x4b0c8c0f: functions.messages.ReportEncryptedSpamRequest,
0x36a73f77: functions.messages.ReadMessageContentsRequest,
0xd5a5d3a1: functions.messages.GetStickersRequest,
0xb8a0a1a8: functions.messages.GetAllStickersRequest,
0x8b68b0cc: functions.messages.GetWebPagePreviewRequest,
0xa02ce5d5: functions.messages.ExportChatInviteRequest,
0x3eadb1bb: functions.messages.CheckChatInviteRequest,
0x6c50051c: functions.messages.ImportChatInviteRequest,
0xc8a0ec74: functions.messages.GetStickerSetRequest,
0xc78fe460: functions.messages.InstallStickerSetRequest,
0xf96e55de: functions.messages.UninstallStickerSetRequest,
0xe6df7378: functions.messages.StartBotRequest,
0x5784d3e1: functions.messages.GetMessagesViewsRequest,
0xa85bd1c2: functions.messages.EditChatAdminRequest,
0xa2875319: functions.messages.MigrateChatRequest,
0x4bc6589a: functions.messages.SearchGlobalRequest,
0x78337739: functions.messages.ReorderStickerSetsRequest,
0xb1f2061f: functions.messages.GetDocumentByHashRequest,
0x5cf09635: functions.messages.GetSavedGifsRequest,
0x327a30cb: functions.messages.SaveGifRequest,
0x514e999d: functions.messages.GetInlineBotResultsRequest,
0xbb12a419: functions.messages.SetInlineBotResultsRequest,
0xd3fbdccb: functions.messages.SendInlineBotResultRequest,
0xfda68d36: functions.messages.GetMessageEditDataRequest,
0x48f71778: functions.messages.EditMessageRequest,
0x83557dba: functions.messages.EditInlineBotMessageRequest,
0x9342ca07: functions.messages.GetBotCallbackAnswerRequest,
0xd58f130a: functions.messages.SetBotCallbackAnswerRequest,
0xe470bcfd: functions.messages.GetPeerDialogsRequest,
0xb4331e3f: functions.messages.SaveDraftRequest,
0x6a3f8d65: functions.messages.GetAllDraftsRequest,
0x64780b14: functions.messages.GetFeaturedStickersRequest,
0x5b118126: functions.messages.ReadFeaturedStickersRequest,
0x9da9403b: functions.messages.GetRecentStickersRequest,
0x392718f8: functions.messages.SaveRecentStickerRequest,
0x8999602d: functions.messages.ClearRecentStickersRequest,
0x57f17692: functions.messages.GetArchivedStickersRequest,
0x640f82b8: functions.messages.GetMaskStickersRequest,
0xcc5b67cc: functions.messages.GetAttachedStickersRequest,
0x8ef8ecc0: functions.messages.SetGameScoreRequest,
0x15ad9f64: functions.messages.SetInlineGameScoreRequest,
0xe822649d: functions.messages.GetGameHighScoresRequest,
0x0f635e1b: functions.messages.GetInlineGameHighScoresRequest,
0xe40ca104: functions.messages.GetCommonChatsRequest,
0x875f74be: functions.messages.GetAllChatsRequest,
0x32ca8f91: functions.messages.GetWebPageRequest,
0xa731e257: functions.messages.ToggleDialogPinRequest,
0x3b1adf37: functions.messages.ReorderPinnedDialogsRequest,
0xd6b94df2: functions.messages.GetPinnedDialogsRequest,
0xe5f672fa: functions.messages.SetBotShippingResultsRequest,
0x09c2dd95: functions.messages.SetBotPrecheckoutResultsRequest,
0x519bc2b1: functions.messages.UploadMediaRequest,
0xc97df020: functions.messages.SendScreenshotNotificationRequest,
0x04f1aaa9: functions.messages.GetFavedStickersRequest,
0xb9ffc55b: functions.messages.FaveStickerRequest,
0xf107e790: functions.messages.GetUnreadMentionsRequest,
0x36e5bf4d: functions.messages.ReadMentionsRequest,
0x702a40e0: functions.messages.GetRecentLocationsRequest,
0xb6f11a1c: functions.messages.SendMultiMediaRequest,
0x5057c497: functions.messages.UploadEncryptedFileRequest,
0x35705b8a: functions.messages.SearchStickerSetsRequest,
0x1cff7e08: functions.messages.GetSplitRangesRequest,
0xc286d98f: functions.messages.MarkDialogUnreadRequest,
0x22e24e22: functions.messages.GetDialogUnreadMarksRequest,
0x7e58ee9c: functions.messages.ClearAllDraftsRequest,
0xd2aaf7ec: functions.messages.UpdatePinnedMessageRequest,
0x10ea6184: functions.messages.SendVoteRequest,
0x73bb643b: functions.messages.GetPollResultsRequest,
0x6e2be050: functions.messages.GetOnlinesRequest,
0xdef60797: functions.messages.EditChatAboutRequest,
0xa5866b41: functions.messages.EditChatDefaultBannedRightsRequest,
0x35a0e062: functions.messages.GetEmojiKeywordsRequest,
0x1508b6af: functions.messages.GetEmojiKeywordsDifferenceRequest,
0x4e9963b2: functions.messages.GetEmojiKeywordsLanguagesRequest,
0xd5b10c26: functions.messages.GetEmojiURLRequest,
0x00ae7cc1: functions.messages.GetSearchCountersRequest,
0x198fb446: functions.messages.RequestUrlAuthRequest,
0xb12c7125: functions.messages.AcceptUrlAuthRequest,
0x4facb138: functions.messages.HidePeerSettingsBarRequest,
0xf516760b: functions.messages.GetScheduledHistoryRequest,
0xbdbb0464: functions.messages.GetScheduledMessagesRequest,
0xbd38850a: functions.messages.SendScheduledMessagesRequest,
0x59ae2b16: functions.messages.DeleteScheduledMessagesRequest,
0xb86e380e: functions.messages.GetPollVotesRequest,
0xb5052fea: functions.messages.ToggleStickerSetsRequest,
0xf19ed96d: functions.messages.GetDialogFiltersRequest,
0xa29cd42c: functions.messages.GetSuggestedDialogFiltersRequest,
0x1ad4a04a: functions.messages.UpdateDialogFilterRequest,
0xc563c1e4: functions.messages.UpdateDialogFiltersOrderRequest,
0x7ed094a1: functions.messages.GetOldFeaturedStickersRequest,
0x22ddd30c: functions.messages.GetRepliesRequest,
0x446972fd: functions.messages.GetDiscussionMessageRequest,
0xf731a9f4: functions.messages.ReadDiscussionRequest,
0xee22b9a8: functions.messages.UnpinAllMessagesRequest,
0x5bd0ee50: functions.messages.DeleteChatRequest,
0xf9cbe409: functions.messages.DeletePhoneCallHistoryRequest,
0x43fe19f3: functions.messages.CheckHistoryImportRequest,
0x34090c3b: functions.messages.InitHistoryImportRequest,
0x2a862092: functions.messages.UploadImportedMediaRequest,
0xb43df344: functions.messages.StartHistoryImportRequest,
0xa2b5a3f6: functions.messages.GetExportedChatInvitesRequest,
0x73746f5c: functions.messages.GetExportedChatInviteRequest,
0xbdca2f75: functions.messages.EditExportedChatInviteRequest,
0x56987bd5: functions.messages.DeleteRevokedExportedChatInvitesRequest,
0xd464a42b: functions.messages.DeleteExportedChatInviteRequest,
0x3920e6ef: functions.messages.GetAdminsWithInvitesRequest,
0xdf04dd4e: functions.messages.GetChatInviteImportersRequest,
0xb80e5fe4: functions.messages.SetHistoryTTLRequest,
0x5dc60f03: functions.messages.CheckHistoryImportPeerRequest,
0xe63be13f: functions.messages.SetChatThemeRequest,
0x31c1c44f: functions.messages.GetMessageReadParticipantsRequest,
0x49f0bde9: functions.messages.GetSearchResultsCalendarRequest,
0x6e9583a3: functions.messages.GetSearchResultsPositionsRequest,
0x7fe7e815: functions.messages.HideChatJoinRequestRequest,
0xe085f4ea: functions.messages.HideAllChatJoinRequestsRequest,
0xb11eafa2: functions.messages.ToggleNoForwardsRequest,
0xccfddf96: functions.messages.SaveDefaultSendAsRequest,
0xd30d78d4: functions.messages.SendReactionRequest,
0x8bba90e6: functions.messages.GetMessagesReactionsRequest,
0x461b3f48: functions.messages.GetMessageReactionsListRequest,
0xfeb16771: functions.messages.SetChatAvailableReactionsRequest,
0x18dea0ac: functions.messages.GetAvailableReactionsRequest,
0x4f47a016: functions.messages.SetDefaultReactionRequest,
0x63183030: functions.messages.TranslateTextRequest,
0x3223495b: functions.messages.GetUnreadReactionsRequest,
0x54aa7f8e: functions.messages.ReadReactionsRequest,
0x107e31a0: functions.messages.SearchSentMediaRequest,
0x16fcc2cb: functions.messages.GetAttachMenuBotsRequest,
0x77216192: functions.messages.GetAttachMenuBotRequest,
0x69f59d69: functions.messages.ToggleBotInAttachMenuRequest,
0x178b480b: functions.messages.RequestWebViewRequest,
0x7ff34309: functions.messages.ProlongWebViewRequest,
0x299bec8e: functions.messages.RequestSimpleWebViewRequest,
0x0a4314f5: functions.messages.SendWebViewResultMessageRequest,
0xdc0242c8: functions.messages.SendWebViewDataRequest,
0x269e9a49: functions.messages.TranscribeAudioRequest,
0x7f1d072f: functions.messages.RateTranscribedAudioRequest,
0xd9ab0f54: functions.messages.GetCustomEmojiDocumentsRequest,
0xfbfca18f: functions.messages.GetEmojiStickersRequest,
0x0ecf6736: functions.messages.GetFeaturedEmojiStickersRequest,
0x3f64c076: functions.messages.ReportReactionRequest,
0xbb8125ba: functions.messages.GetTopReactionsRequest,
0x39461db2: functions.messages.GetRecentReactionsRequest,
0x9dfeefb4: functions.messages.ClearRecentReactionsRequest,
0x84f80814: functions.messages.GetExtendedMediaRequest,
0x9eb51445: functions.messages.SetDefaultHistoryTTLRequest,
0x658b7188: functions.messages.GetDefaultHistoryTTLRequest,
0xfe38d01b: functions.messages.SendBotRequestedPeerRequest,
0x7488ce5b: functions.messages.GetEmojiGroupsRequest,
0x2ecd56cd: functions.messages.GetEmojiStatusGroupsRequest,
0x21a548f3: functions.messages.GetEmojiProfilePhotoGroupsRequest,
0x2c11c0d7: functions.messages.SearchCustomEmojiRequest,
0xe47cb579: functions.messages.TogglePeerTranslationsRequest,
0x34fdc5c3: functions.messages.GetBotAppRequest,
0x8c5a3b3c: functions.messages.RequestAppWebViewRequest,
0x8ffacae1: functions.messages.SetChatWallPaperRequest,
0xedd4882a: functions.updates.GetStateRequest,
0x25939651: functions.updates.GetDifferenceRequest,
0x03173d78: functions.updates.GetChannelDifferenceRequest,
0x09e82039: functions.photos.UpdateProfilePhotoRequest,
0x0388a3b5: functions.photos.UploadProfilePhotoRequest,
0x87cf7f2f: functions.photos.DeletePhotosRequest,
0x91cd32a8: functions.photos.GetUserPhotosRequest,
0xe14c4a71: functions.photos.UploadContactProfilePhotoRequest,
0xb304a621: functions.upload.SaveFilePartRequest,
0xbe5335be: functions.upload.GetFileRequest,
0xde7b673d: functions.upload.SaveBigFilePartRequest,
0x24e6818d: functions.upload.GetWebFileRequest,
0x395f69da: functions.upload.GetCdnFileRequest,
0x9b2754a8: functions.upload.ReuploadCdnFileRequest,
0x91dc3f31: functions.upload.GetCdnFileHashesRequest,
0x9156982a: functions.upload.GetFileHashesRequest,
0xc4f9186b: functions.help.GetConfigRequest,
0x1fb33026: functions.help.GetNearestDcRequest,
0x522d5a7d: functions.help.GetAppUpdateRequest,
0x4d392343: functions.help.GetInviteTextRequest,
0x9cdf08cd: functions.help.GetSupportRequest,
0x9010ef6f: functions.help.GetAppChangelogRequest,
0xec22cfcd: functions.help.SetBotUpdatesStatusRequest,
0x52029342: functions.help.GetCdnConfigRequest,
0x3dc0f114: functions.help.GetRecentMeUrlsRequest,
0x2ca51fd1: functions.help.GetTermsOfServiceUpdateRequest,
0xee72f79a: functions.help.AcceptTermsOfServiceRequest,
0x3fedc75f: functions.help.GetDeepLinkInfoRequest,
0x61e3f854: functions.help.GetAppConfigRequest,
0x6f02f748: functions.help.SaveAppLogRequest,
0xc661ad08: functions.help.GetPassportConfigRequest,
0xd360e72c: functions.help.GetSupportNameRequest,
0x038a08d3: functions.help.GetUserInfoRequest,
0x66b91b70: functions.help.EditUserInfoRequest,
0xc0977421: functions.help.GetPromoDataRequest,
0x1e251c95: functions.help.HidePromoDataRequest,
0xf50dbaa1: functions.help.DismissSuggestionRequest,
0x735787a8: functions.help.GetCountriesListRequest,
0xb81b93d4: functions.help.GetPremiumPromoRequest,
0xcc104937: functions.channels.ReadHistoryRequest,
0x84c1fd4e: functions.channels.DeleteMessagesRequest,
0xf44a8315: functions.channels.ReportSpamRequest,
0xad8c9a23: functions.channels.GetMessagesRequest,
0x77ced9d0: functions.channels.GetParticipantsRequest,
0xa0ab6cc6: functions.channels.GetParticipantRequest,
0x0a7f6bbb: functions.channels.GetChannelsRequest,
0x08736a09: functions.channels.GetFullChannelRequest,
0x91006707: functions.channels.CreateChannelRequest,
0xd33c8902: functions.channels.EditAdminRequest,
0x566decd0: functions.channels.EditTitleRequest,
0xf12e57c9: functions.channels.EditPhotoRequest,
0x10e6bd2c: functions.channels.CheckUsernameRequest,
0x3514b3de: functions.channels.UpdateUsernameRequest,
0x24b524c5: functions.channels.JoinChannelRequest,
0xf836aa95: functions.channels.LeaveChannelRequest,
0x199f3a6c: functions.channels.InviteToChannelRequest,
0xc0111fe3: functions.channels.DeleteChannelRequest,
0xe63fadeb: functions.channels.ExportMessageLinkRequest,
0x1f69b606: functions.channels.ToggleSignaturesRequest,
0xf8b036af: functions.channels.GetAdminedPublicChannelsRequest,
0x96e6cd81: functions.channels.EditBannedRequest,
0x33ddf480: functions.channels.GetAdminLogRequest,
0xea8ca4f9: functions.channels.SetStickersRequest,
0xeab5dc38: functions.channels.ReadMessageContentsRequest,
0x9baa9647: functions.channels.DeleteHistoryRequest,
0xeabbb94c: functions.channels.TogglePreHistoryHiddenRequest,
0x8341ecc0: functions.channels.GetLeftChannelsRequest,
0xf5dad378: functions.channels.GetGroupsForDiscussionRequest,
0x40582bb2: functions.channels.SetDiscussionGroupRequest,
0x8f38cd1f: functions.channels.EditCreatorRequest,
0x58e63f6d: functions.channels.EditLocationRequest,
0xedd49ef0: functions.channels.ToggleSlowModeRequest,
0x11e831ee: functions.channels.GetInactiveChannelsRequest,
0x0b290c69: functions.channels.ConvertToGigagroupRequest,
0xbeaedb94: functions.channels.ViewSponsoredMessageRequest,
0xec210fbf: functions.channels.GetSponsoredMessagesRequest,
0x0dc770ee: functions.channels.GetSendAsRequest,
0x367544db: functions.channels.DeleteParticipantHistoryRequest,
0xe4cb9580: functions.channels.ToggleJoinToSendRequest,
0x4c2985b6: functions.channels.ToggleJoinRequestRequest,
0xb45ced1d: functions.channels.ReorderUsernamesRequest,
0x50f24105: functions.channels.ToggleUsernameRequest,
0x0a245dd3: functions.channels.DeactivateAllUsernamesRequest,
0xa4298b29: functions.channels.ToggleForumRequest,
0xf40c0224: functions.channels.CreateForumTopicRequest,
0x0de560d1: functions.channels.GetForumTopicsRequest,
0xb0831eb9: functions.channels.GetForumTopicsByIDRequest,
0xf4dfa185: functions.channels.EditForumTopicRequest,
0x6c2d9026: functions.channels.UpdatePinnedForumTopicRequest,
0x34435f2d: functions.channels.DeleteTopicHistoryRequest,
0x2950a18f: functions.channels.ReorderPinnedForumTopicsRequest,
0x68f3e4eb: functions.channels.ToggleAntiSpamRequest,
0xa850a693: functions.channels.ReportAntiSpamFalsePositiveRequest,
0x6a6e7854: functions.channels.ToggleParticipantsHiddenRequest,
0xaa2769ed: functions.bots.SendCustomRequestRequest,
0xe6213f4d: functions.bots.AnswerWebhookJSONQueryRequest,
0x0517165a: functions.bots.SetBotCommandsRequest,
0x3d8de0f9: functions.bots.ResetBotCommandsRequest,
0xe34c0dd6: functions.bots.GetBotCommandsRequest,
0x4504d54f: functions.bots.SetBotMenuButtonRequest,
0x9c60eb28: functions.bots.GetBotMenuButtonRequest,
0x788464e1: functions.bots.SetBotBroadcastDefaultAdminRightsRequest,
0x925ec9ea: functions.bots.SetBotGroupDefaultAdminRightsRequest,
0x10cf3123: functions.bots.SetBotInfoRequest,
0xdcd914fd: functions.bots.GetBotInfoRequest,
0x9709b1c2: functions.bots.ReorderUsernamesRequest,
0x053ca973: functions.bots.ToggleUsernameRequest,
0x37148dbb: functions.payments.GetPaymentFormRequest,
0x2478d1cc: functions.payments.GetPaymentReceiptRequest,
0xb6c8f12b: functions.payments.ValidateRequestedInfoRequest,
0x2d03522f: functions.payments.SendPaymentFormRequest,
0x227d824b: functions.payments.GetSavedInfoRequest,
0xd83d70c1: functions.payments.ClearSavedInfoRequest,
0x2e79d779: functions.payments.GetBankCardDataRequest,
0x0f91b065: functions.payments.ExportInvoiceRequest,
0x80ed747d: functions.payments.AssignAppStoreTransactionRequest,
0xdffd50d3: functions.payments.AssignPlayMarketTransactionRequest,
0x9fc19eb6: functions.payments.CanPurchasePremiumRequest,
0x9021ab67: functions.stickers.CreateStickerSetRequest,
0xf7760f51: functions.stickers.RemoveStickerFromSetRequest,
0xffb6d4ca: functions.stickers.ChangeStickerPositionRequest,
0x8653febe: functions.stickers.AddStickerToSetRequest,
0xa76a5392: functions.stickers.SetStickerSetThumbRequest,
0x284b3639: functions.stickers.CheckShortNameRequest,
0x4dafc503: functions.stickers.SuggestShortNameRequest,
0xf5537ebc: functions.stickers.ChangeStickerRequest,
0x124b1c00: functions.stickers.RenameStickerSetRequest,
0x87704394: functions.stickers.DeleteStickerSetRequest,
0x55451fa9: functions.phone.GetCallConfigRequest,
0x42ff96ed: functions.phone.RequestCallRequest,
0x3bd2b4a0: functions.phone.AcceptCallRequest,
0x2efe1722: functions.phone.ConfirmCallRequest,
0x17d54f61: functions.phone.ReceivedCallRequest,
0xb2cbc1c0: functions.phone.DiscardCallRequest,
0x59ead627: functions.phone.SetCallRatingRequest,
0x277add7e: functions.phone.SaveCallDebugRequest,
0xff7a9383: functions.phone.SendSignalingDataRequest,
0x48cdc6d8: functions.phone.CreateGroupCallRequest,
0xb132ff7b: functions.phone.JoinGroupCallRequest,
0x500377f9: functions.phone.LeaveGroupCallRequest,
0x7b393160: functions.phone.InviteToGroupCallRequest,
0x7a777135: functions.phone.DiscardGroupCallRequest,
0x74bbb43d: functions.phone.ToggleGroupCallSettingsRequest,
0x041845db: functions.phone.GetGroupCallRequest,
0xc558d8ab: functions.phone.GetGroupParticipantsRequest,
0xb59cf977: functions.phone.CheckGroupCallRequest,
0xf128c708: functions.phone.ToggleGroupCallRecordRequest,
0xa5273abf: functions.phone.EditGroupCallParticipantRequest,
0x1ca6ac0a: functions.phone.EditGroupCallTitleRequest,
0xef7c213a: functions.phone.GetGroupCallJoinAsRequest,
0xe6aa647f: functions.phone.ExportGroupCallInviteRequest,
0x219c34e6: functions.phone.ToggleGroupCallStartSubscriptionRequest,
0x5680e342: functions.phone.StartScheduledGroupCallRequest,
0x575e1f8c: functions.phone.SaveDefaultGroupCallJoinAsRequest,
0xcbea6bc4: functions.phone.JoinGroupCallPresentationRequest,
0x1c50d144: functions.phone.LeaveGroupCallPresentationRequest,
0x1ab21940: functions.phone.GetGroupCallStreamChannelsRequest,
0xdeb3abbf: functions.phone.GetGroupCallStreamRtmpUrlRequest,
0x41248786: functions.phone.SaveCallLogRequest,
0xf2f2330a: functions.langpack.GetLangPackRequest,
0xefea3803: functions.langpack.GetStringsRequest,
0xcd984aa5: functions.langpack.GetDifferenceRequest,
0x42c6978f: functions.langpack.GetLanguagesRequest,
0x6a596502: functions.langpack.GetLanguageRequest,
0x6847d0ab: functions.folders.EditPeerFoldersRequest,
0xab42441a: functions.stats.GetBroadcastStatsRequest,
0x621d5fa0: functions.stats.LoadAsyncGraphRequest,
0xdcdf8607: functions.stats.GetMegagroupStatsRequest,
0x5630281b: functions.stats.GetMessagePublicForwardsRequest,
0xb6e0a3f5: functions.stats.GetMessageStatsRequest,
0x8472478e: functions.chatlists.ExportChatlistInviteRequest,
0x719c5c5e: functions.chatlists.DeleteExportedInviteRequest,
0x653db63d: functions.chatlists.EditExportedInviteRequest,
0xce03da83: functions.chatlists.GetExportedInvitesRequest,
0x41c10fff: functions.chatlists.CheckChatlistInviteRequest,
0xa6b1e39a: functions.chatlists.JoinChatlistInviteRequest,
0x89419521: functions.chatlists.GetChatlistUpdatesRequest,
0xe089f8f5: functions.chatlists.JoinChatlistUpdatesRequest,
0x66e486fb: functions.chatlists.HideChatlistUpdatesRequest,
0xfdbcd714: functions.chatlists.GetLeaveChatlistSuggestionsRequest,
0x74fae13a: functions.chatlists.LeaveChatlistRequest,
0x05162463: types.ResPQ,
0x83c95aec: types.PQInnerData,
0xa9f55f95: types.PQInnerDataDc,
0x3c6a84d4: types.PQInnerDataTemp,
0x56fddf88: types.PQInnerDataTempDc,
0x75a3f765: types.BindAuthKeyInner,
0x79cb045d: types.ServerDHParamsFail,
0xd0e8075c: types.ServerDHParamsOk,
0xb5890dba: types.ServerDHInnerData,
0x6643b654: types.ClientDHInnerData,
0x3bcbf734: types.DhGenOk,
0x46dc1fb9: types.DhGenRetry,
0xa69dae02: types.DhGenFail,
0xf660e1d4: types.DestroyAuthKeyOk,
0x0a9f2259: types.DestroyAuthKeyNone,
0xea109b13: types.DestroyAuthKeyFail,
0x60469778: functions.ReqPqRequest,
0xbe7e8ef1: functions.ReqPqMultiRequest,
0xd712e4be: functions.ReqDHParamsRequest,
0xf5045f1f: functions.SetClientDHParamsRequest,
0xd1435160: functions.DestroyAuthKeyRequest,
0x62d6b459: types.MsgsAck,
0xa7eff811: types.BadMsgNotification,
0xedab447b: types.BadServerSalt,
0xda69fb52: types.MsgsStateReq,
0x04deb57d: types.MsgsStateInfo,
0x8cc0d131: types.MsgsAllInfo,
0x276d3ec6: types.MsgDetailedInfo,
0x809db6df: types.MsgNewDetailedInfo,
0x7d861a08: types.MsgResendReq,
0x2144ca19: types.RpcError,
0x5e2ad36e: types.RpcAnswerUnknown,
0xcd78e586: types.RpcAnswerDroppedRunning,
0xa43ad8b7: types.RpcAnswerDropped,
0x0949d9dc: types.FutureSalt,
0xae500895: types.FutureSalts,
0x347773c5: types.Pong,
0xe22045fc: types.DestroySessionOk,
0x62d350c9: types.DestroySessionNone,
0x9ec20908: types.NewSessionCreated,
0x9299359f: types.HttpWait,
0xd433ad73: types.IpPort,
0x37982646: types.IpPortSecret,
0x4679b65f: types.AccessPointRule,
0x5a592a6c: types.help.ConfigSimple,
0x6c52c484: types.TlsClientHello,
0x4218a164: types.TlsBlockString,
0x4d4dc41e: types.TlsBlockRandom,
0x09333afb: types.TlsBlockZero,
0x10e8636f: types.TlsBlockDomain,
0xe675a1c1: types.TlsBlockGrease,
0x9eb95b5c: types.TlsBlockPublicKey,
0xe725d44f: types.TlsBlockScope,
0x58e4a740: functions.RpcDropAnswerRequest,
0xb921bd04: functions.GetFutureSaltsRequest,
0x7abe77ec: functions.PingRequest,
0xf3427b8c: functions.PingDelayDisconnectRequest,
0xe7512126: functions.DestroySessionRequest,
} | PypiClean |
/BIA_OBS-1.0.3.tar.gz/BIA_OBS-1.0.3/BIA/static/dist/node_modules/tailwindcss/lib/postcss-plugins/nesting/README.md | # tailwindcss/nesting
This is a PostCSS plugin that wraps [postcss-nested](https://github.com/postcss/postcss-nested) or [postcss-nesting](https://github.com/csstools/postcss-plugins/tree/main/plugins/postcss-nesting) and acts as a compatibility layer to make sure your nesting plugin of choice properly understands Tailwind's custom syntax like `@apply` and `@screen`.
Add it to your PostCSS configuration, somewhere before Tailwind itself:
```js
// postcss.config.js
module.exports = {
plugins: [
require('postcss-import'),
require('tailwindcss/nesting'),
require('tailwindcss'),
require('autoprefixer'),
]
}
```
By default, it uses the [postcss-nested](https://github.com/postcss/postcss-nested) plugin under the hood, which uses a Sass-like syntax and is the plugin that powers nesting support in the [Tailwind CSS plugin API](https://tailwindcss.com/docs/plugins#css-in-js-syntax).
If you'd rather use [postcss-nesting](https://github.com/csstools/postcss-plugins/tree/main/plugins/postcss-nesting) (which is based on the work-in-progress [CSS Nesting](https://drafts.csswg.org/css-nesting-1/) specification), first install the plugin alongside:
```shell
npm install postcss-nesting
```
Then pass the plugin itself as an argument to `tailwindcss/nesting` in your PostCSS configuration:
```js
// postcss.config.js
module.exports = {
plugins: [
require('postcss-import'),
require('tailwindcss/nesting')(require('postcss-nesting')),
require('tailwindcss'),
require('autoprefixer'),
]
}
```
This can also be helpful if for whatever reason you need to use a very specific version of `postcss-nested` and want to override the version we bundle with `tailwindcss/nesting` itself.
| PypiClean |
/Bayesian2D-0.3.1.tar.gz/Bayesian2D-0.3.1/README.md | # Bayesian2D
This package implements Bayesian optimization in Python for any 2D function. It uses Gaussian regression to create a surrogate function and the Maximum Probability of Improvement aquisition function to pick points to evaluate, thus finding the specified extremum of the function in only a few hundred evaluations.
# How to install
The package can simply be installed with 'pip install Bayesian2D'.
# How to use
The package contains two directories- tools and tests. The tools folder contains all the separate python functions used by the algorithm, with the Bayesian2D function being the main function of the package.
To optimize your function just import 'from Bayesian2D.tools import Bayesian2D'. The function takes as an input the function you wish to optimize and the bounds in which you wish to search for the extremum (there are a few built in named functions such as 'Beale' or 'Ackley' with the Rosenbrock function being the default but custom functions can also be inserted). The function also requires you to specify the number of initial points evaluated, the number of optimization cycles run, the number of random points evaluated by the surrogate function each cycle, the exploration constant and whether you want to find the maximum or minimum.
# Testing
Unit tests for all the functions used can be found in the aforementioned tests directory.
| PypiClean |
/OBP_reliability_pillar_4-0.0.14-py3-none-any.whl/OBP_reliability_pillar_4/elastic_beanstalk/enhanced_health_reporting_enabled.py | import botocore
import logging
from botocore.exceptions import ClientError
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger()
def enhanced_health_reporting_enabled(self) -> dict:
"""
:param self:
:return:
"""
logger.info(" ---Inside elastic_beanstalk :: enhanced_health_reporting_enabled()")
regions = self.session.get_available_regions('elasticbeanstalk')
result = True
failReason = ''
offenders = []
control_id = 'Id3.13'
compliance_type = "Beanstalk Enhanced Health Reporting Enabled"
description = "Checks if an AWS Elastic Beanstalk environment is configured for enhanced health reporting. The rule is COMPLIANT if the environment is configured for enhanced health reporting."
resource_type = "Elastic Beanstalk"
risk_level = 'Medium'
for region in regions:
try:
client = self.session.client('elasticbeanstalk', region_name=region)
marker = ''
while True:
if marker == '' or marker is None:
response_describe_eb = client.describe_environments()
else:
response_describe_eb = client.describe_environments(
NextToken=marker
)
for env in response_describe_eb['Environments']:
if len(env['HealthStatus']) == 0:
result = False
failReason = 'AWS Elastic Beanstalk environment is not configured for enhanced health reporting.'
offenders.append(env['EnvironmentId'])
try:
marker = response_describe_eb['NextToken']
if marker == '':
break
except KeyError:
break
except ClientError as e:
logger.error("Something went wrong with region {}: {}".format(region, e))
return {
'Result': result,
'failReason': failReason,
'resource_type': resource_type,
'ControlId': control_id,
'Offenders': offenders,
'Compliance_type': compliance_type,
'Description': description,
'Risk Level': risk_level
} | PypiClean |
/MaterialDjango-0.2.5.tar.gz/MaterialDjango-0.2.5/materialdjango/static/materialdjango/components/bower_components/paper-toolbar/.github/ISSUE_TEMPLATE.md | <!-- Instructions: https://github.com/PolymerElements/paper-toolbar/CONTRIBUTING.md#filing-issues -->
### Description
<!-- Example: The `paper-foo` element causes the page to turn pink when clicked. -->
### Expected outcome
<!-- Example: The page stays the same color. -->
### Actual outcome
<!-- Example: The page turns pink. -->
### Live Demo
<!-- Example: https://jsbin.com/cagaye/edit?html,output -->
### Steps to reproduce
<!-- Example
1. Put a `paper-foo` element in the page.
2. Open the page in a web browser.
3. Click the `paper-foo` element.
-->
### Browsers Affected
<!-- Check all that apply -->
- [ ] Chrome
- [ ] Firefox
- [ ] Safari 9
- [ ] Safari 8
- [ ] Safari 7
- [ ] Edge
- [ ] IE 11
- [ ] IE 10
| PypiClean |
/Automancy-0.5.12.tar.gz/Automancy-0.5.12/automancy/elementals/molecules/slider.py |
from time import sleep
from automancy.core import AutomancyChain, Elemental
class Slider(Elemental):
""" Handler for slider objects (E.G., video volume, video progress bar) """
def __init__(self, locator: str, human_name: str, system_name: str, orientation: str, boundary_buffer: int = 0, position_attribute: str = ''):
"""
Args:
locator (str): xpath string for the lookup
human_name (str): human-readable name
system_name (str): system-readable name
orientation (str): Either 'vertical' or 'horizontal'
boundary_buffer (int): Optional, The total amount of padding around the slider components subtracted from the element width
position_attribute (str): Optional, A defined value that can be used as a lookup reference to determine the current slider location value
"""
super().__init__(locator, human_name, system_name)
self.boundary_buffer = boundary_buffer / 2 if boundary_buffer else 0
self.orientation = orientation
self.position_attribute = position_attribute
@property
def width(self):
"""
Returns:
"""
return self.element().size['width']
@width.setter
def width(self, value):
self._width = value
@property
def height(self):
"""
Returns:
"""
return self.element().size['height']
@height.setter
def height(self, value):
self._height = value
def change_value(self, desired_percent):
"""
Uses clicks to directly change the slider current position to a new location
(as opposed to clicking and dragging)
Args:
desired_percent (int): A percent value
Returns:
None
"""
# Raise error if desired percent is not an integer
if not isinstance(desired_percent, int):
raise ValueError
# Ensure that the desired percent is within 0 -> 100 boundaries
desired_percent = 0 if desired_percent < 0 else desired_percent
desired_percent = 100 if desired_percent > 100 else desired_percent
target_offset_x, target_offset_y = self.define_offsets(desired_percent)
if desired_percent > 50 and target_offset_x < 0:
target_offset_x *= 1
# Create the action chain
actions = AutomancyChain(self.browser)
actions.move_to_element(self.element())
actions.move_by_offset(target_offset_x, target_offset_y)
actions.click()
# TODO -> Run tests that use this method (with sleep commented out), remove if possible
# Arbitrary (maybe?) explicit sleep so unknown style transitions have a chance to draw.
sleep(0.5)
# Finally, perform the action chain
actions.perform()
def drag_to_value(self, desired_percent):
"""
Not implemented
Intended Design:
1. Find the position of the current value position
2. Find where the current position is in relationship to the total element length (in percent)
3. Use the current position as the center from where the offset will start
4. Determine the distance from the starting point to the new desired value (in percent)
5. Use the action chain method "drag_and_drop" or "drag_and_drop_by_offset"
Returns:
None
"""
raise NotImplementedError
def define_offsets(self, desired_percent):
"""
Defines the target offsets that are used to move the slider position to
a target location.
Determine proper offset values depending on if the slider is horizontal or vertical
Returns:
(tuple): The X and Y coordinates for the target coordinates
"""
target_offset_x = self.get_target_offset(desired_percent) if self.orientation == 'horizontal' else 0
target_offset_y = self.get_target_offset(desired_percent) if self.orientation == 'vertical' else 0
return target_offset_x, target_offset_y
def get_target_offset(self, desired_percent):
"""
Acquires the offset value needed to move the slider to a new position
by clicking or clicking and dragging.
Returns:
(int): The offset to move the slider position to.
"""
# TODO -> Need to include sliders that are vertical
# Store the width so we're not doing the lookup twice
width = self.width
# Determine the percentage slice size and the location of the middle of the slider element
slider_percent = width / 100
slider_middle = width / 2
# Find the absolute target percentage that we're aiming for
target_pixel = int(round(desired_percent * slider_percent))
# Determine the offset from the middle of the element to target
target_offset = max(slider_middle, target_pixel) - min(slider_middle, target_pixel)
# Change the target offset to a negative number if it's less than half of the size of the element width
if target_pixel < slider_middle:
target_offset = target_offset * -1
# Define the modifier value to compensate for element margins/padding/etc
boundary_modifier = round(self.boundary_buffer / 2)
# GeckoDriver is weird with how it calculates element center points when an element has margins/padding/etc.
# Dividing the boundary modifier in half solves this. (Haven't found an answer to why yet)
if self.browser_used == 'firefox':
boundary_modifier /= 2
# Adjust the final target offset value by the boundary buffer amount.
if desired_percent >= 50:
target_offset += boundary_modifier
else:
target_offset -= boundary_modifier
return target_offset
def current_position(self):
"""
Inspects the element for a determinable current slider "notch" position.
This method uses self.position_attribute which could be something like
the "aria-valuenow" attribute on an element.
Returns:
(int) The value of the slider current position indicator
"""
if self.position_attribute:
position_value = self.element().get_attribute(self.position_attribute)
# Prevent situations where the query happens too fast
while self.element().get_attribute(self.position_attribute) == 'NaN':
position_value = self.element().get_attribute(self.position_attribute)
return int(float(position_value))
else:
return None | PypiClean |
/FStore-1.0.2-py3-none-any.whl/fstore/binary_search_tree.py | from collections import deque
class Node:
"""tree element"""
def __init__(self, key, val):
self.key = key
self.val = val
self.left = None
self.right = None
def search(root, key):
"""search tree by key"""
if not root:
return None
if key < root.key:
return search(root.left, key)
if key > root.key:
return search(root.right, key)
return root.val
def insert(root, key, val):
"""insert a node"""
# Return a new node if the tree is empty
if not root:
return Node(key, val)
# Traverse to the right place and insert the node
if key < root.key:
root.left = insert(root.left, key, val)
else:
root.right = insert(root.right, key, val)
return root
def inorder_successor(root):
"""find inorder successor"""
current = root
while current.left:
current = current.left
return current
def delete(root, key):
"""delete node"""
if not root:
# Key is not in tree
return root
# Find the node to be deleted
if key < root.key:
root.left = delete(root.left, key)
elif key > root.key:
root.right = delete(root.right, key)
else:
# If the node is with only one child or no child
if root.left is None:
temp = root.right
root = None
return temp
if root.right is None:
temp = root.left
root = None
return temp
# If the node has two children,
# place the inorder successor in position of the node to be deleted
temp = inorder_successor(root.right)
root.key = temp.key
# Delete the inorder successor
root.right = delete(root.right, temp.key)
return root
def serialize(root):
"""serialize tree to string"""
if not root:
return ""
result = ""
q = deque()
q.append(root)
while len(q) > 0:
cur = q.popleft()
if cur:
result += str(cur.key) + ":" + str(cur.val)
if cur.left:
q.append(cur.left)
else:
q.append(None)
if cur.right:
q.append(cur.right)
else:
q.append(None)
else:
result += "NULL:NULL"
result += ","
return result
def deserialize(string):
"""deserialize tree from string"""
# Parse string into array of strings
tmp = string.split(",")
data = []
for i in range(len(tmp)-1):
# Separate strings into key-value pairs
data.append(tmp[i].split(":"))
return make(data)
def make(elements):
"""make tree from array of key-value pairs"""
root = None
for element in elements:
key = element[0]
val = element[1]
# Skip over null nodes
if key != "NULL":
root = insert(root, key, val)
return root | PypiClean |
/Geosis-0.0.1.tar.gz/Geosis-0.0.1/README.md | <h1 align="center">
<br>
<a href="https://github.com/mohamed-hachaichi/Geosis"><img src="https://github.com/mohamed-hachaichi/app/blob/main/Geosis.png" alt="" width="200"></a>
<br>
Geosis
<br>
</h1>
<h4 align="center">A powerful geographic analysis tool on top of <a href="https://www.python.org/" target="_blank">Python</a>.</h4>
<div id="top"></div>
<!-- PROJECT LOGO -->
<br />
<div align="center">
<a href="https://github.com/othneildrew/Best-README-Template">
<img src="images/logo.png" alt="Logo" width="160" height="160">
</a>
<h3 align="center">Geosis</h3>
<p align="center">
A machine learning-based package for spatial analysis
<br />
<a href="https://github.com/mohamed-hachaichi/Geosis"><strong>Explore the docs »</strong></a>
</p>
</div>
<!-- TABLE OF CONTENTS -->
<details>
<summary>Table of Contents</summary>
<ol>
<li>
<a href="#about-the-project">About The Project</a>
<ul>
<li><a href="#built-with">Built With</a></li>
</ul>
</li>
<li>
<a href="#getting-started">Getting Started</a>
<ul>
<li><a href="#prerequisites">Prerequisites</a></li>
<li><a href="#installation">Installation</a></li>
</ul>
</li>
<li><a href="#usage">Usage</a></li>
<li><a href="#roadmap">Roadmap</a></li>
<li><a href="#contributing">Contributing</a></li>
<li><a href="#license">License</a></li>
<li><a href="#contact">Contact</a></li>
<li><a href="#acknowledgments">Acknowledgments</a></li>
</ol>
</details>
<!-- ABOUT THE PROJECT -->
## About The Project
The geography of knowledge production represents the method by which local scientific output are accepted, produced and debated elsewhere. However, in order to analyze geographic data, one must look for trends, networks, evolutions and relationships. However, while open source and free software started attracting academic from various disciplines, several handicaps persist such as (i) synthesizing the bottom-up knowledge production, (ii) inspect the genealogy of a given field, (iii) displaying the spatial distribution of the field across territories, and (iv) unpacking the spatial community network structure. Geosis is an artificial intelligence-based package developed to reply to such questions in a fast and easily process using large-scale textual data. The input data goes into three main data preprocessing stages. The first is a geoparsing module where the textual data became geo-referenced. The second is a natural language processing (NLP) module where data is synthesized and major themes are extracted. The third is network analysis module where research community on the field is mapped and major field producers are unveiled. To our knowledge, our package is considered to be the first package that can unpack all the aspects of “knowledge production” for any given field.
<p align="right">(<a href="#top">back to top</a>)</p>
### Built With
This section should list any major frameworks/libraries used to bootstrap your project. Leave any add-ons/plugins for the acknowledgements section. Here are a few examples.
* [Pandas](https://pandas.pydata.org)
* [GeoPandas](https://geopandas.org/en/stable/)
* [NetworkX](https://networkx.org)
* [Dask](https://www.dask.org)
<p align="right">(<a href="#top">back to top</a>)</p>
<!-- GETTING STARTED -->
## Getting Started
To start using Geosis, you need to install: pandas, geopandas, dask, networkx, seaborn, and sklearn.
### Prerequisites
To install the package please inser the felowing code in your prompt:
* bach
```sh
pip install pandas, geopandas, networkx, seaborn
```
### Installation
_Below is the code to install Geosis from the Pypi website._
1. Open your terminal.
2. write the fellowing code:
```sh
pip install geosis
```
<p align="right">(<a href="#top">back to top</a>)</p>
<!-- USAGE EXAMPLES -->
## Usage
To have an brief introduction on how to use the package. Please read the article at: -waiting to be published-.
_For more examples, please refer to the [Documentation](https://example.com)_
<p align="right">(<a href="#top">back to top</a>)</p>
<!-- ROADMAP -->
## Roadmap
- [x] Collect data from Scopus or Web of Science (WoS)
- [x] Read the data using Geosis' local functions
- [ ] Add Additional Templates w/ Examples
- [ ] Add "components" document to easily copy & paste sections of the readme
- [x] Multi-language Support
- [x] English
- [ ] French
- [ ] Chinese
- [ ] Spanish
See the [open issues](https://github.com/mohamed-hachaichi/Geosis/issues) for a full list of proposed features (and known issues).
<p align="right">(<a href="#top">back to top</a>)</p>
<!-- CONTRIBUTING -->
## Contributing
Contributions are what make the open source community such an amazing place to learn, inspire, and create. Any contributions you make are **greatly appreciated**.
If you have a suggestion that would make this better, please fork the repo and create a pull request. You can also simply open an issue with the tag "enhancement".
Don't forget to give the project a star! Thanks again!
1. Fork the Project
2. Create your Feature Branch (`git checkout -b feature/AmazingFeature`)
3. Commit your Changes (`git commit -m 'Add some AmazingFeature'`)
4. Push to the Branch (`git push origin feature/AmazingFeature`)
5. Open a Pull Request
<p align="right">(<a href="#top">back to top</a>)</p>
<!-- LICENSE -->
## License
Distributed under the GNU GENERAL PUBLIC LICENSE. See `LICENSE.txt` for more information.
<p align="right">(<a href="#top">back to top</a>)</p>
<!-- CONTACT -->
## Contact
Mohamed Hachiachi - [@datum_geek](https://twitter.com/datum_geek) - [email protected]
Project Link: [Geosis](https://github.com/mohamed-hachaichi/Geosis)
<p align="right">(<a href="#top">back to top</a>)</p>
<!-- ACKNOWLEDGMENTS -->
## Acknowledgments
The python package is accessible from PyPI following the link: . Note that the package will be maintained, and new releases will be available in the future expanding its geographical analysis scope and providing much more capabilities and mapping options. Geosis is built on: Pandas, GeoPandas, and NetworkX.
<p align="right">(<a href="#top">back to top</a>)</p> | PypiClean |
/Editra-0.7.20.tar.gz/Editra-0.7.20/src/syntax/_xtext.py | __author__ = "Igor Dejanovic <[email protected]>"
__svnid__ = "$Id: _xtext.py 70229 2012-01-01 01:27:10Z CJP $"
__revision__ = "$Revision: 70229 $"
#-----------------------------------------------------------------------------#
# Imports
import wx.stc as stc
from pygments.lexer import RegexLexer, include, bygroups
from pygments.token import Token, Text, Comment, Operator, \
Keyword, Name, String, Number, Punctuation
import re
# Local Imports
import synglob
import syndata
#-----------------------------------------------------------------------------#
# Style Id's
STC_XTEXT_DEFAULT, \
STC_XTEXT_COMMENT, \
STC_XTEXT_NUMBER, \
STC_XTEXT_STRING, \
STC_XTEXT_STRINGEOL, \
STC_XTEXT_OPERATOR, \
STC_XTEXT_NAME, \
STC_XTEXT_ABSTRACTRULE, \
STC_XTEXT_FEATURE, \
STC_XTEXT_CROSSREF, \
STC_XTEXT_PACKAGE, \
STC_XTEXT_KEYWORD, \
STC_XTEXT_KEYWORD_PSEUDO = range(13)
#-----------------------------------------------------------------------------#
#---- Keyword Specifications ----#
# Xtext Keywords
KEYWORDS = ("grammar generate import returns enum terminal hidden with as current")
TERMINALS = ("ID INT STRING")
#---- Syntax Style Specs ----#
SYNTAX_ITEMS = [ (STC_XTEXT_DEFAULT, 'default_style'),
(STC_XTEXT_COMMENT, 'comment_style'),
(STC_XTEXT_NUMBER, 'number_style'),
(STC_XTEXT_STRING, 'string_style'),
(STC_XTEXT_STRINGEOL, 'stringeol_style'),
(STC_XTEXT_OPERATOR, 'operator_style'),
(STC_XTEXT_NAME, 'default_style'),
(STC_XTEXT_ABSTRACTRULE, 'keyword3_style'),
(STC_XTEXT_FEATURE, 'default_style'),
(STC_XTEXT_CROSSREF, 'class_style'),
(STC_XTEXT_PACKAGE, 'class_style'),
(STC_XTEXT_KEYWORD, 'keyword_style'),
(STC_XTEXT_KEYWORD_PSEUDO, 'keyword2_style'), ]
#-------- Xtext grammar rules ---------------
#---- Extra Properties ----#
#-----------------------------------------------------------------------------#
class SyntaxData(syndata.SyntaxDataBase):
"""SyntaxData object for XText"""
def __init__(self, langid):
super(SyntaxData, self).__init__(langid)
# Setup
self.SetLexer(stc.STC_LEX_CONTAINER)
self.RegisterFeature(synglob.FEATURE_AUTOINDENT, AutoIndenter)
self.RegisterFeature(synglob.FEATURE_STYLETEXT, StyleText)
def GetKeywords(self):
"""Returns Specified Keywords List """
return [(1, KEYWORDS)]
def GetSyntaxSpec(self):
"""Syntax Specifications """
return SYNTAX_ITEMS
def GetCommentPattern(self):
"""Returns a list of characters used to comment a block of code """
return [u"//"]
#-----------------------------------------------------------------------------#
# Features
def StyleText(stc, start, end):
"""Style the text
@param stc: Styled text control instance
@param start: Start position
@param end: end position
"""
for index, token, txt in lexer.get_tokens_unprocessed(stc.GetTextRange(0, end)):
# print index, token, txt
style = TOKEN_MAP.get(token, STC_XTEXT_DEFAULT)
# print "Text=%s, len=%s" % (txt, len(txt))
stc.StartStyling(index, 0x1f)
tlen = len(txt)
if tlen:
stc.SetStyling(len(txt), style)
def AutoIndenter(estc, pos, ichar):
"""Auto indent xtext code.
This code is based on python AutoIndenter.
@param estc: EditraStyledTextCtrl
@param pos: current carat position
@param ichar: Indentation character
@return: string
"""
rtxt = u''
line = estc.GetCurrentLine()
spos = estc.PositionFromLine(line)
text = estc.GetTextRange(spos, pos)
eolch = estc.GetEOLChar()
inspace = text.isspace()
# Cursor is in the indent area somewhere or in the column 0.
if inspace or not len(text):
estc.AddText(eolch + text)
return
text = text.strip()
if text.endswith(";"):
estc.AddText(eolch)
return
indent = estc.GetLineIndentation(line)
if ichar == u"\t":
tabw = estc.GetTabWidth()
else:
tabw = estc.GetIndent()
i_space = indent / tabw
end_spaces = ((indent - (tabw * i_space)) * u" ")
if text.endswith(u":"):
i_space += 1
rtxt = eolch + ichar * i_space + end_spaces
# Put text in the buffer
estc.AddText(rtxt)
#-----------------------------------------------------------------------------#
TOKEN_MAP = { Token.String : STC_XTEXT_STRING,
Token.Comment.Multiline : STC_XTEXT_COMMENT,
Token.Comment.Single : STC_XTEXT_COMMENT,
Token.Operator : STC_XTEXT_OPERATOR,
Token.Punctuation : STC_XTEXT_OPERATOR,
Token.Number.Integer : STC_XTEXT_NUMBER,
Token.Keyword : STC_XTEXT_KEYWORD,
Token.Keyword.Pseudo: STC_XTEXT_KEYWORD_PSEUDO,
Token.Name : STC_XTEXT_NAME,
Token.Name.AbstractRule : STC_XTEXT_ABSTRACTRULE,
Token.Name.Feature : STC_XTEXT_FEATURE,
Token.Name.CrossRef : STC_XTEXT_CROSSREF,
Token.Name.Package : STC_XTEXT_PACKAGE,
Token.Name.Package.EMF : STC_XTEXT_PACKAGE}
class XTextLexer(RegexLexer):
"""
Xtext lexer based on statefull RegexLexer from pygments library.
"""
name = 'Xtext'
aliases = ['xtext']
filenames = ['*.xtxt']
mimetypes = ['text/x-xtext']
flags = re.MULTILINE | re.DOTALL # | re.UNICODE
#: optional Comment or Whitespace
#_ws = r'(?:\s|//.*?\n|/[*].*?[*]/)+'
def AltWords(words):
"""Makes lexer rule for alternative words from the given words list.
@param words: string consisting of space separated words
@return: string in the form \\bword1\\b|\\bword2\\b|\\bword3\b...
"""
return "|".join([ "\\b%s\\b" % w for w in words.split()])
_ident = r'\^?[a-zA-Z_\$][a-zA-Z0-9_]*'
tokens = {
'root': [
(include('first')),
(_ident + r'(\.' + _ident + r')+', Name.Package),
('(' + _ident + r')(\s*)(returns)',
bygroups(Name.AbstractRule, Text.Whitespace, Keyword), 'parserrule'),
('(' + _ident + r')(\s*)(:)',
bygroups(Name.AbstractRule, Text.Whitespace, Punctuation), 'parserrule'),
(_ident, Name),
],
'first': [
(r'/\*', Comment.Multiline, 'comment'),
(r'\n', Token.EndOfLine),
(r'//[^\n]*$', Comment.Single),
(r'[ \t]+', Text.Whitespace),
(r'"(\\\\|\\"|[^"])*"', String),
(r"'(\\\\|\\'|[^'])*'", String),
(r'\*|\?|\+|!|\||=|\?=|\+=|\.\.|->', Operator),
(r'[()\[\]{}:]', Punctuation),
(r'[0-9]+', Number.Integer),
(AltWords(KEYWORDS), Keyword),
(AltWords(TERMINALS), Keyword.Pseudo),
(_ident + r'(::' + _ident + r')+', Name.Package.EMF),
],
'parserrule': [
(include('first')),
('(' + _ident + r'(\.' + _ident + r')?)([ \t]*)(=|\?=|\+=)',
bygroups(Name.Feature, Text.Whitespace, Operator)),
(_ident + r'(\.' + _ident + r')+', Name.Package),
(_ident, Name.CrossRef),
(r';', Punctuation, "#pop"),
],
'comment': [
# Nested and multiline comments
(r'/\*', Comment.Multiline, "#push"),
(r'\*/', Comment.Multiline, "#pop"),
(r'\n', Token.EndOfLine),
(r'[^/*\n]+', Comment.Multiline),
(r'\*|\/', Comment.Multiline),
],
}
lexer = XTextLexer()
if __name__=='__main__':
import codecs, sys
ftext = codecs.open(sys.argv[1], "r", "utf-8")
text = ftext.read()
ftext.close()
line=1
for index, token, txt in lexer.get_tokens_unprocessed(text):
if token is Token.EndOfLine:
line += 1
print line, token, txt | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/@types/node/ts4.8/vm.d.ts | declare module 'vm' {
interface Context extends NodeJS.Dict<any> {}
interface BaseOptions {
/**
* Specifies the filename used in stack traces produced by this script.
* Default: `''`.
*/
filename?: string | undefined;
/**
* Specifies the line number offset that is displayed in stack traces produced by this script.
* Default: `0`.
*/
lineOffset?: number | undefined;
/**
* Specifies the column number offset that is displayed in stack traces produced by this script.
* @default 0
*/
columnOffset?: number | undefined;
}
interface ScriptOptions extends BaseOptions {
/**
* V8's code cache data for the supplied source.
*/
cachedData?: Buffer | NodeJS.ArrayBufferView | undefined;
/** @deprecated in favor of `script.createCachedData()` */
produceCachedData?: boolean | undefined;
/**
* Called during evaluation of this module when `import()` is called.
* If this option is not specified, calls to `import()` will reject with `ERR_VM_DYNAMIC_IMPORT_CALLBACK_MISSING`.
*/
importModuleDynamically?: ((specifier: string, script: Script, importAssertions: Object) => Module) | undefined;
}
interface RunningScriptOptions extends BaseOptions {
/**
* When `true`, if an `Error` occurs while compiling the `code`, the line of code causing the error is attached to the stack trace.
* Default: `true`.
*/
displayErrors?: boolean | undefined;
/**
* Specifies the number of milliseconds to execute code before terminating execution.
* If execution is terminated, an `Error` will be thrown. This value must be a strictly positive integer.
*/
timeout?: number | undefined;
/**
* If `true`, the execution will be terminated when `SIGINT` (Ctrl+C) is received.
* Existing handlers for the event that have been attached via `process.on('SIGINT')` will be disabled during script execution, but will continue to work after that.
* If execution is terminated, an `Error` will be thrown.
* Default: `false`.
*/
breakOnSigint?: boolean | undefined;
}
interface RunningScriptInNewContextOptions extends RunningScriptOptions {
/**
* Human-readable name of the newly created context.
*/
contextName?: CreateContextOptions['name'];
/**
* Origin corresponding to the newly created context for display purposes. The origin should be formatted like a URL,
* but with only the scheme, host, and port (if necessary), like the value of the `url.origin` property of a `URL` object.
* Most notably, this string should omit the trailing slash, as that denotes a path.
*/
contextOrigin?: CreateContextOptions['origin'];
contextCodeGeneration?: CreateContextOptions['codeGeneration'];
/**
* If set to `afterEvaluate`, microtasks will be run immediately after the script has run.
*/
microtaskMode?: CreateContextOptions['microtaskMode'];
}
interface RunningCodeOptions extends RunningScriptOptions {
cachedData?: ScriptOptions['cachedData'];
importModuleDynamically?: ScriptOptions['importModuleDynamically'];
}
interface RunningCodeInNewContextOptions extends RunningScriptInNewContextOptions {
cachedData?: ScriptOptions['cachedData'];
importModuleDynamically?: ScriptOptions['importModuleDynamically'];
}
interface CompileFunctionOptions extends BaseOptions {
/**
* Provides an optional data with V8's code cache data for the supplied source.
*/
cachedData?: Buffer | undefined;
/**
* Specifies whether to produce new cache data.
* Default: `false`,
*/
produceCachedData?: boolean | undefined;
/**
* The sandbox/context in which the said function should be compiled in.
*/
parsingContext?: Context | undefined;
/**
* An array containing a collection of context extensions (objects wrapping the current scope) to be applied while compiling
*/
contextExtensions?: Object[] | undefined;
}
interface CreateContextOptions {
/**
* Human-readable name of the newly created context.
* @default 'VM Context i' Where i is an ascending numerical index of the created context.
*/
name?: string | undefined;
/**
* Corresponds to the newly created context for display purposes.
* The origin should be formatted like a `URL`, but with only the scheme, host, and port (if necessary),
* like the value of the `url.origin` property of a URL object.
* Most notably, this string should omit the trailing slash, as that denotes a path.
* @default ''
*/
origin?: string | undefined;
codeGeneration?:
| {
/**
* If set to false any calls to eval or function constructors (Function, GeneratorFunction, etc)
* will throw an EvalError.
* @default true
*/
strings?: boolean | undefined;
/**
* If set to false any attempt to compile a WebAssembly module will throw a WebAssembly.CompileError.
* @default true
*/
wasm?: boolean | undefined;
}
| undefined;
/**
* If set to `afterEvaluate`, microtasks will be run immediately after the script has run.
*/
microtaskMode?: 'afterEvaluate' | undefined;
}
type MeasureMemoryMode = 'summary' | 'detailed';
interface MeasureMemoryOptions {
/**
* @default 'summary'
*/
mode?: MeasureMemoryMode | undefined;
/**
* @default 'default'
*/
execution?: 'default' | 'eager' | undefined;
}
interface MemoryMeasurement {
total: {
jsMemoryEstimate: number;
jsMemoryRange: [number, number];
};
}
/**
* Instances of the `vm.Script` class contain precompiled scripts that can be
* executed in specific contexts.
* @since v0.3.1
*/
class Script {
constructor(code: string, options?: ScriptOptions | string);
/**
* Runs the compiled code contained by the `vm.Script` object within the given`contextifiedObject` and returns the result. Running code does not have access
* to local scope.
*
* The following example compiles code that increments a global variable, sets
* the value of another global variable, then execute the code multiple times.
* The globals are contained in the `context` object.
*
* ```js
* const vm = require('vm');
*
* const context = {
* animal: 'cat',
* count: 2
* };
*
* const script = new vm.Script('count += 1; name = "kitty";');
*
* vm.createContext(context);
* for (let i = 0; i < 10; ++i) {
* script.runInContext(context);
* }
*
* console.log(context);
* // Prints: { animal: 'cat', count: 12, name: 'kitty' }
* ```
*
* Using the `timeout` or `breakOnSigint` options will result in new event loops
* and corresponding threads being started, which have a non-zero performance
* overhead.
* @since v0.3.1
* @param contextifiedObject A `contextified` object as returned by the `vm.createContext()` method.
* @return the result of the very last statement executed in the script.
*/
runInContext(contextifiedObject: Context, options?: RunningScriptOptions): any;
/**
* First contextifies the given `contextObject`, runs the compiled code contained
* by the `vm.Script` object within the created context, and returns the result.
* Running code does not have access to local scope.
*
* The following example compiles code that sets a global variable, then executes
* the code multiple times in different contexts. The globals are set on and
* contained within each individual `context`.
*
* ```js
* const vm = require('vm');
*
* const script = new vm.Script('globalVar = "set"');
*
* const contexts = [{}, {}, {}];
* contexts.forEach((context) => {
* script.runInNewContext(context);
* });
*
* console.log(contexts);
* // Prints: [{ globalVar: 'set' }, { globalVar: 'set' }, { globalVar: 'set' }]
* ```
* @since v0.3.1
* @param contextObject An object that will be `contextified`. If `undefined`, a new object will be created.
* @return the result of the very last statement executed in the script.
*/
runInNewContext(contextObject?: Context, options?: RunningScriptInNewContextOptions): any;
/**
* Runs the compiled code contained by the `vm.Script` within the context of the
* current `global` object. Running code does not have access to local scope, but _does_ have access to the current `global` object.
*
* The following example compiles code that increments a `global` variable then
* executes that code multiple times:
*
* ```js
* const vm = require('vm');
*
* global.globalVar = 0;
*
* const script = new vm.Script('globalVar += 1', { filename: 'myfile.vm' });
*
* for (let i = 0; i < 1000; ++i) {
* script.runInThisContext();
* }
*
* console.log(globalVar);
*
* // 1000
* ```
* @since v0.3.1
* @return the result of the very last statement executed in the script.
*/
runInThisContext(options?: RunningScriptOptions): any;
/**
* Creates a code cache that can be used with the `Script` constructor's`cachedData` option. Returns a `Buffer`. This method may be called at any
* time and any number of times.
*
* ```js
* const script = new vm.Script(`
* function add(a, b) {
* return a + b;
* }
*
* const x = add(1, 2);
* `);
*
* const cacheWithoutX = script.createCachedData();
*
* script.runInThisContext();
*
* const cacheWithX = script.createCachedData();
* ```
* @since v10.6.0
*/
createCachedData(): Buffer;
/** @deprecated in favor of `script.createCachedData()` */
cachedDataProduced?: boolean | undefined;
cachedDataRejected?: boolean | undefined;
cachedData?: Buffer | undefined;
/**
* When the script is compiled from a source that contains a source map magic comment, this property will be set to the URL of the source map.
*/
sourceMapURL?: string | undefined;
}
/**
* If given a `contextObject`, the `vm.createContext()` method will `prepare
* that object` so that it can be used in calls to {@link runInContext} or `script.runInContext()`. Inside such scripts,
* the `contextObject` will be the global object, retaining all of its existing
* properties but also having the built-in objects and functions any standard [global object](https://es5.github.io/#x15.1) has. Outside of scripts run by the vm module, global variables
* will remain unchanged.
*
* ```js
* const vm = require('vm');
*
* global.globalVar = 3;
*
* const context = { globalVar: 1 };
* vm.createContext(context);
*
* vm.runInContext('globalVar *= 2;', context);
*
* console.log(context);
* // Prints: { globalVar: 2 }
*
* console.log(global.globalVar);
* // Prints: 3
* ```
*
* If `contextObject` is omitted (or passed explicitly as `undefined`), a new,
* empty `contextified` object will be returned.
*
* The `vm.createContext()` method is primarily useful for creating a single
* context that can be used to run multiple scripts. For instance, if emulating a
* web browser, the method can be used to create a single context representing a
* window's global object, then run all `<script>` tags together within that
* context.
*
* The provided `name` and `origin` of the context are made visible through the
* Inspector API.
* @since v0.3.1
* @return contextified object.
*/
function createContext(sandbox?: Context, options?: CreateContextOptions): Context;
/**
* Returns `true` if the given `object` object has been `contextified` using {@link createContext}.
* @since v0.11.7
*/
function isContext(sandbox: Context): boolean;
/**
* The `vm.runInContext()` method compiles `code`, runs it within the context of
* the `contextifiedObject`, then returns the result. Running code does not have
* access to the local scope. The `contextifiedObject` object _must_ have been
* previously `contextified` using the {@link createContext} method.
*
* If `options` is a string, then it specifies the filename.
*
* The following example compiles and executes different scripts using a single `contextified` object:
*
* ```js
* const vm = require('vm');
*
* const contextObject = { globalVar: 1 };
* vm.createContext(contextObject);
*
* for (let i = 0; i < 10; ++i) {
* vm.runInContext('globalVar *= 2;', contextObject);
* }
* console.log(contextObject);
* // Prints: { globalVar: 1024 }
* ```
* @since v0.3.1
* @param code The JavaScript code to compile and run.
* @param contextifiedObject The `contextified` object that will be used as the `global` when the `code` is compiled and run.
* @return the result of the very last statement executed in the script.
*/
function runInContext(code: string, contextifiedObject: Context, options?: RunningCodeOptions | string): any;
/**
* The `vm.runInNewContext()` first contextifies the given `contextObject` (or
* creates a new `contextObject` if passed as `undefined`), compiles the `code`,
* runs it within the created context, then returns the result. Running code
* does not have access to the local scope.
*
* If `options` is a string, then it specifies the filename.
*
* The following example compiles and executes code that increments a global
* variable and sets a new one. These globals are contained in the `contextObject`.
*
* ```js
* const vm = require('vm');
*
* const contextObject = {
* animal: 'cat',
* count: 2
* };
*
* vm.runInNewContext('count += 1; name = "kitty"', contextObject);
* console.log(contextObject);
* // Prints: { animal: 'cat', count: 3, name: 'kitty' }
* ```
* @since v0.3.1
* @param code The JavaScript code to compile and run.
* @param contextObject An object that will be `contextified`. If `undefined`, a new object will be created.
* @return the result of the very last statement executed in the script.
*/
function runInNewContext(code: string, contextObject?: Context, options?: RunningCodeInNewContextOptions | string): any;
/**
* `vm.runInThisContext()` compiles `code`, runs it within the context of the
* current `global` and returns the result. Running code does not have access to
* local scope, but does have access to the current `global` object.
*
* If `options` is a string, then it specifies the filename.
*
* The following example illustrates using both `vm.runInThisContext()` and
* the JavaScript [`eval()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/eval) function to run the same code:
*
* ```js
* const vm = require('vm');
* let localVar = 'initial value';
*
* const vmResult = vm.runInThisContext('localVar = "vm";');
* console.log(`vmResult: '${vmResult}', localVar: '${localVar}'`);
* // Prints: vmResult: 'vm', localVar: 'initial value'
*
* const evalResult = eval('localVar = "eval";');
* console.log(`evalResult: '${evalResult}', localVar: '${localVar}'`);
* // Prints: evalResult: 'eval', localVar: 'eval'
* ```
*
* Because `vm.runInThisContext()` does not have access to the local scope,`localVar` is unchanged. In contrast,
* [`eval()`](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/eval) _does_ have access to the
* local scope, so the value `localVar` is changed. In this way`vm.runInThisContext()` is much like an [indirect `eval()` call](https://es5.github.io/#x10.4.2), e.g.`(0,eval)('code')`.
*
* ## Example: Running an HTTP server within a VM
*
* When using either `script.runInThisContext()` or {@link runInThisContext}, the code is executed within the current V8 global
* context. The code passed to this VM context will have its own isolated scope.
*
* In order to run a simple web server using the `http` module the code passed to
* the context must either call `require('http')` on its own, or have a reference
* to the `http` module passed to it. For instance:
*
* ```js
* 'use strict';
* const vm = require('vm');
*
* const code = `
* ((require) => {
* const http = require('http');
*
* http.createServer((request, response) => {
* response.writeHead(200, { 'Content-Type': 'text/plain' });
* response.end('Hello World\\n');
* }).listen(8124);
*
* console.log('Server running at http://127.0.0.1:8124/');
* })`;
*
* vm.runInThisContext(code)(require);
* ```
*
* The `require()` in the above case shares the state with the context it is
* passed from. This may introduce risks when untrusted code is executed, e.g.
* altering objects in the context in unwanted ways.
* @since v0.3.1
* @param code The JavaScript code to compile and run.
* @return the result of the very last statement executed in the script.
*/
function runInThisContext(code: string, options?: RunningCodeOptions | string): any;
/**
* Compiles the given code into the provided context (if no context is
* supplied, the current context is used), and returns it wrapped inside a
* function with the given `params`.
* @since v10.10.0
* @param code The body of the function to compile.
* @param params An array of strings containing all parameters for the function.
*/
function compileFunction(code: string, params?: ReadonlyArray<string>, options?: CompileFunctionOptions): Function & {
cachedData?: Script['cachedData'] | undefined;
cachedDataProduced?: Script['cachedDataProduced'] | undefined;
cachedDataRejected?: Script['cachedDataRejected'] | undefined;
};
/**
* Measure the memory known to V8 and used by all contexts known to the
* current V8 isolate, or the main context.
*
* The format of the object that the returned Promise may resolve with is
* specific to the V8 engine and may change from one version of V8 to the next.
*
* The returned result is different from the statistics returned by`v8.getHeapSpaceStatistics()` in that `vm.measureMemory()` measure the
* memory reachable by each V8 specific contexts in the current instance of
* the V8 engine, while the result of `v8.getHeapSpaceStatistics()` measure
* the memory occupied by each heap space in the current V8 instance.
*
* ```js
* const vm = require('vm');
* // Measure the memory used by the main context.
* vm.measureMemory({ mode: 'summary' })
* // This is the same as vm.measureMemory()
* .then((result) => {
* // The current format is:
* // {
* // total: {
* // jsMemoryEstimate: 2418479, jsMemoryRange: [ 2418479, 2745799 ]
* // }
* // }
* console.log(result);
* });
*
* const context = vm.createContext({ a: 1 });
* vm.measureMemory({ mode: 'detailed', execution: 'eager' })
* .then((result) => {
* // Reference the context here so that it won't be GC'ed
* // until the measurement is complete.
* console.log(context.a);
* // {
* // total: {
* // jsMemoryEstimate: 2574732,
* // jsMemoryRange: [ 2574732, 2904372 ]
* // },
* // current: {
* // jsMemoryEstimate: 2438996,
* // jsMemoryRange: [ 2438996, 2768636 ]
* // },
* // other: [
* // {
* // jsMemoryEstimate: 135736,
* // jsMemoryRange: [ 135736, 465376 ]
* // }
* // ]
* // }
* console.log(result);
* });
* ```
* @since v13.10.0
* @experimental
*/
function measureMemory(options?: MeasureMemoryOptions): Promise<MemoryMeasurement>;
interface ModuleEvaluateOptions {
timeout?: RunningScriptOptions['timeout'] | undefined;
breakOnSigint?: RunningScriptOptions['breakOnSigint'] | undefined;
}
type ModuleLinker = (specifier: string, referencingModule: Module, extra: { assert: Object }) => Module | Promise<Module>;
type ModuleStatus = 'unlinked' | 'linking' | 'linked' | 'evaluating' | 'evaluated' | 'errored';
class Module {
/**
* The specifiers of all dependencies of this module.
*/
dependencySpecifiers: readonly string[];
/**
* If the `module.status` is `'errored'`, this property contains the exception thrown by the module during evaluation.
* If the status is anything else, accessing this property will result in a thrown exception.
*/
error: any;
/**
* The identifier of the current module, as set in the constructor.
*/
identifier: string;
context: Context;
/**
* The namespace object of the module. This is only available after linking (`module.link()`) has completed.
*/
namespace: Object;
/**
* The current status of the module.
*/
status: ModuleStatus;
/**
* Evaluate the module.
*
* This must be called after the module has been linked; otherwise it will reject
* It could be called also when the module has already been evaluated, in which case it will either do nothing
* if the initial evaluation ended in success (`module.status` is `'evaluated'`) or it will re-throw the exception
* that the initial evaluation resulted in (`module.status` is `'errored'`).
*
* This method cannot be called while the module is being evaluated (`module.status` is `'evaluating'`).
*/
evaluate(options?: ModuleEvaluateOptions): Promise<void>;
/**
* Link module dependencies. This method must be called before evaluation, and can only be called once per module.
*/
link(linker: ModuleLinker): Promise<void>;
}
interface SourceTextModuleOptions {
/**
* String used in stack traces.
* @default 'vm:module(i)' where i is a context-specific ascending index.
*/
identifier?: string | undefined;
cachedData?: ScriptOptions['cachedData'] | undefined;
context?: Context | undefined;
lineOffset?: BaseOptions['lineOffset'] | undefined;
columnOffset?: BaseOptions['columnOffset'] | undefined;
/**
* Called during evaluation of this module to initialize the `import.meta`.
*/
initializeImportMeta?: ((meta: ImportMeta, module: SourceTextModule) => void) | undefined;
importModuleDynamically?: ScriptOptions['importModuleDynamically'] | undefined;
}
class SourceTextModule extends Module {
/**
* Creates a new `SourceTextModule` instance.
* @param code JavaScript Module code to parse
*/
constructor(code: string, options?: SourceTextModuleOptions);
}
interface SyntheticModuleOptions {
/**
* String used in stack traces.
* @default 'vm:module(i)' where i is a context-specific ascending index.
*/
identifier?: string | undefined;
/**
* The contextified object as returned by the `vm.createContext()` method, to compile and evaluate this module in.
*/
context?: Context | undefined;
}
class SyntheticModule extends Module {
/**
* Creates a new `SyntheticModule` instance.
* @param exportNames Array of names that will be exported from the module.
* @param evaluateCallback Called when the module is evaluated.
*/
constructor(exportNames: string[], evaluateCallback: (this: SyntheticModule) => void, options?: SyntheticModuleOptions);
/**
* This method is used after the module is linked to set the values of exports.
* If it is called before the module is linked, an `ERR_VM_MODULE_STATUS` error will be thrown.
* @param name
* @param value
*/
setExport(name: string, value: any): void;
}
}
declare module 'node:vm' {
export * from 'vm';
} | PypiClean |
/CeLEryPy-1.1.1-py3-none-any.whl/CeLEry/util.py | import pandas as pd
import numpy as np
import scipy
import os
import scanpy as sc
from tqdm import tqdm
# from skimage.metrics import structural_similarity as ssim
# import pickle
from math import floor
import json
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
# from seaborn import heatmap as seaheatmap
from scipy.sparse import issparse
import matplotlib.pyplot as plt
# from anndata import AnnData,read_csv,read_text,read_mtx
# from scipy.sparse import issparse
def prefilter_cells(adata,min_counts=None,max_counts=None,min_genes=200,max_genes=None):
if min_genes is None and min_counts is None and max_genes is None and max_counts is None:
raise ValueError('Provide one of min_counts, min_genes, max_counts or max_genes.')
id_tmp=np.asarray([True]*adata.shape[0],dtype=bool)
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,min_genes=min_genes)[0]) if min_genes is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,max_genes=max_genes)[0]) if max_genes is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,min_counts=min_counts)[0]) if min_counts is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_cells(adata.X,max_counts=max_counts)[0]) if max_counts is not None else id_tmp
adata._inplace_subset_obs(id_tmp)
adata.raw=sc.pp.log1p(adata,copy=True) #check the rowname
print("the var_names of adata.raw: adata.raw.var_names.is_unique=:",adata.raw.var_names.is_unique)
def prefilter_genes(adata,min_counts=None,max_counts=None,min_cells=10,max_cells=None):
if min_cells is None and min_counts is None and max_cells is None and max_counts is None:
raise ValueError('Provide one of min_counts, min_genes, max_counts or max_genes.')
id_tmp=np.asarray([True]*adata.shape[1],dtype=bool)
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,min_cells=min_cells)[0]) if min_cells is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,max_cells=max_cells)[0]) if max_cells is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,min_counts=min_counts)[0]) if min_counts is not None else id_tmp
id_tmp=np.logical_and(id_tmp,sc.pp.filter_genes(adata.X,max_counts=max_counts)[0]) if max_counts is not None else id_tmp
adata._inplace_subset_var(id_tmp)
def prefilter_specialgenes(adata,Gene1Pattern="ERCC",Gene2Pattern="MT-"):
id_tmp1=np.asarray([not str(name).startswith(Gene1Pattern) for name in adata.var_names],dtype=bool)
id_tmp2=np.asarray([not str(name).startswith(Gene2Pattern) for name in adata.var_names],dtype=bool)
id_tmp=np.logical_and(id_tmp1,id_tmp2)
adata._inplace_subset_var(id_tmp)
def centralize (data):
datanew = data.copy()
for i in tqdm(range(datanew.shape[0])):
z = datanew[i,0,:,:]
zmin = z.min()
zmax = z.max()
if (zmax != zmin):
datanew[i,0,:,:] = (z-zmin)/(zmax-zmin)
else:
datanew[i,0,:,:] = z / (zmax + 1)
return datanew
def centralize2 (data):
datanew = data.copy()
mask = (datanew != 0) * 1
zmin = datanew.min()
zmax = datanew.max()
if (zmax != zmin):
datanew = (datanew-zmin)/(zmax-zmin) * mask
else:
datanew = datanew / (zmax + 1) * mask
return datanew
def getGeneImg (datainput, emptypixel, obsset = None):
# Transform the AnnData file into Genes of images
# datainput: the input data of AnnData object
# obsset: the set of location column names if they are not x_cord, y_cord
# emptypixel: a float that indicate the value on the missing pixel
adata = (datainput.X.A if issparse(datainput.X) else datainput.X)
if obsset is None:
x = datainput.obs["x_cord"]
y = datainput.obs["y_cord"]
else:
x = datainput.obs[obsset[0]]
y = datainput.obs[obsset[1]]
xmin = x.min()
xmax = x.max()
ymin = y.min()
ymax = y.max()
## Append a one-side padding if the axis is odd
## if ((xmax-xmin+1) % 2 == 0):
## xlim = xmax-xmin+2
## else:
xlim = xmax-xmin+1
## if ((ymax-ymin+1) % 2 == 0):
## ylim = ymax-ymin+2
## else:
ylim = ymax-ymin+1
shape = (xlim,ylim)
all_arr = []
firstIteration = True
for i in tqdm(range(adata.shape[1])):
z = adata[:,i]
zmin = z.min()
zmax = z.max()
# create array for image : zmax+1 is the default value
img = np.array(np.ones(shape)*emptypixel)
for inp in range(x.shape[0]):
if (z[inp]!=emptypixel):
img[x.iloc[inp]-xmin,y.iloc[inp]-ymin]=z[inp]
all_arr.append(img)
datainput.GeneImg = np.stack(all_arr)
# def getGeneImgSparse (adata, emptypixel):
# # Transform the AnnData file into Genes of images for sparse matrix format
# # adata: the input data of AnnData object
# # emptypixel: a float that indicate the value on the missing pixel
# x = adata.obs.iloc[:,0]
# y = adata.obs.iloc[:,0]
# xmin = x.min().iloc[0]
# xmax = x.max().iloc[0]
# ymin = y.min().iloc[0]
# ymax = y.max().iloc[0]
# ## Append a one-side padding if the axis is odd
# if ((xmax-xmin+1) % 2 == 0):
# xlim = xmax-xmin+2
# else:
# xlim = xmax-xmin+1
# if ((ymax-ymin+1) % 2 == 0):
# ylim = ymax-ymin+2
# else:
# ylim = ymax-ymin+1
# shape = (xlim,ylim)
# all_arr = []
# firstIteration = True
# for i in tqdm(range(adata.X.shape[1])):
# z = adata.X[:,i]
# zmin = z.min()
# zmax = z.max()
# # create array for image : zmax+1 is the default value
# img = np.array(np.ones(shape)*emptypixel)
# for inp in range(x.shape[0]):
# if (z[inp,0]!=emptypixel):
# img[x.iloc[inp]-xmin,y.iloc[inp]-ymin]=z[inp,0]
# all_arr.append(img)
# adata.GeneImg = np.stack(all_arr)
def plotGeneImg (img, filename = None, range = None, plotcolor = 'YlGnBu'):
# set mask on default value
# img.mask = (img==0)
plt.figure()
shape = img.shape
# set a gray background for test
img_bg_test = np.zeros(shape)
cmap_bg_test = plt.get_cmap('gray')
plt.imshow(img_bg_test,cmap=cmap_bg_test,interpolation='none',vmin=0,vmax=6)
# plot
cmap = plt.get_cmap(plotcolor)
plt.imshow(img,cmap=cmap,interpolation='none')
if range is not None:
plt.clim(range[0], range[1])
plt.colorbar()
if filename is None:
plt.show()
else:
plt.savefig(filename + '.pdf')
def plotarrangefile (bdataexpand, foldername, label, Path = "../output/"):
try:
os.mkdir( Path + foldername)
except FileExistsError:
print("Folder already exists")
except FileNotFoundError:
print("The path before foldername is not found.")
else:
print ("Folder {foldername} is successfully created".format(foldername = foldername))
ncategory = np.bincount(label).shape[0]
for i in range(ncategory):
try:
os.mkdir(Path + "{foldername}/{labels}/".format(foldername = foldername, labels=i))
except FileExistsError:
print("Folder for group {i} already exists".format(i = i))
except FileNotFoundError:
print("The path before foldername is not found.")
else:
print ("Folder group {i} is successfully created".format(i = i))
for i in tqdm(range(bdataexpand.shape[0])):
plotGeneImg(bdataexpand[i,0,:,:], filename = Path + "{foldername}/{labels}/fig{i}".format(foldername = foldername, labels = label[i], i = i))
def get_zscore (adata, mean = None, sd = None ):
genotypedata = (adata.X.A if issparse(adata.X) else adata.X)
if mean is None:
genemean = np.mean(genotypedata, axis =0)
genesd = np.std(genotypedata, axis = 0)
else:
genemean = mean
genesd = sd
try:
if adata.standardize is not True:
datatransform = (genotypedata - genemean) / genesd
adata.X = datatransform
adata.genemean = genemean
adata.genesd = genesd
adata.standardize = True
else:
print("Data has already been z-scored")
except AttributeError:
datatransform = (genotypedata - genemean) / genesd
adata.X = datatransform
adata.genemean = genemean
adata.genesd = genesd
adata.standardize = True
def get_histlgy_color (coords, refer, histimage, beta = 49):
"""
According to the predicted coordinates. Get histology image from the coordinates.
:coords: numpy [Length_Locations x 2]: the predicted coordinates. Each cell in [0,1]
:refer: dataframe [Length_Locations x 2]: the true location in the data
:beta: int [1]: to control the range of neighbourhood when calculate grey vale for one spot
:histimage: numpy [xlen x ylen]: Histology data
:return: numpy [ xlen x ylen x 3 (RGB values)]
"""
beta_half=round(beta/2)
imageshape = histimage.shape
maxx = imageshape[0]
maxy = imageshape[1]
referx = refer.iloc[:,0]
refery = refer.iloc[:,1]
referxmin = referx.min()
referxmax = referx.max()
referymin = refery.min()
referymax = refery.max()
xlen = referxmax - referxmin + 1
ylen = referymax - referymin + 1
canvus = np.array(np.ones((xlen,ylen,3))*255) ## backgroud color white: 255 black: 0
for i in range(coords.shape[0]):
# Step 1: Capture the corresponding from the histology information
x_pixel_pred = round(coords[i,0]*maxx)
y_pixel_pred = round(coords[i,1]*maxy)
subimage = histimage[max(0,x_pixel_pred-beta_half):min(maxx,x_pixel_pred+beta_half+1), max(0,y_pixel_pred-beta_half):min(maxy,y_pixel_pred+beta_half+1)]
subimage_mean = np.mean(np.mean(subimage, axis = 0), axis = 0)
# Place the color on the canvus of original map
referx_current = refer.iloc[i,0]
refery_current = refer.iloc[i,1]
canvus[referx_current - referxmin, refery_current - referymin,:] = subimage_mean
return(canvus)
def printimage (image, path):
"""
According to the predicted coordinates. Get histology image from the coordinates.
:image: numpy [xlen x ylen x 3 (RGB values)]: the image
:path: string [1]: the path to print the plot
"""
plt.imshow(image/255)
plt.savefig(path + '.pdf')
def closest_node(node, nodes):
dist_2 = np.sum((nodes - node)**2, axis=1)
return np.argmin(dist_2)
def get_Layer_LIBD (adata, coords_predict, referann):
"""
Get the layer label of the LIBD data
:adata: the main adata that are working with
:coords_predict: Numpy [n x 2]: the predicted coordinates based on deep neural network
:referann: AnnData: the AnnData for the reference data. Usually the training data
"""
referlocation = referann.obs.copy()
referx = referlocation.iloc[:,0]
refery = referlocation.iloc[:,1]
referxmin = referx.min()
referxmax = referx.max()
referymin = refery.min()
referymax = refery.max()
xlen = referxmax - referxmin + 1
ylen = referymax - referymin + 1
# Normalized the dictionary coordinates
referlocation.iloc[:,0] = (referlocation.iloc[:,0] - referxmin) / xlen
referlocation.iloc[:,1] = (referlocation.iloc[:,1] - referymin) / ylen
reloc_np = referlocation.to_numpy()
reloc_np = reloc_np[:,0:2]
# Find the closet points in the dictionary for each predicted cords
pred = np.zeros(coords_predict.shape[0])
for i in range(coords_predict.shape[0]):
pred[i] = closest_node(coords_predict[i,:] , reloc_np)
# map the coordinates
pred_layer = referlocation.iloc[pred,2]
adata.obs["pred_layer"] = pred_layer.to_numpy()
return pred_layer
def plot_layer (adata, folder, name, coloruse):
if coloruse is None:
colors_use = ['#46327e', '#365c8d', '#277f8e', '#1fa187', '#4ac16d', '#a0da39', '#fde725', '#ffbb78', '#2ca02c', '#ff7f0e', '#1f77b4', '#800080', '#959595', '#ffff00', '#014d01', '#0000ff', '#ff0000', '#000000']
else:
colors_use = coloruse
# colors_use = ['#111010', '#FFFF00', '#4a6fe3', '#bb7784', '#bec1d4', '#ff9896', '#98df8a', '#ffbb78', '#2ca02c', '#ff7f0e', '#1f77b4', '#800080', '#959595', '#ffff00', '#014d01', '#0000ff', '#ff0000', '#000000']
num_celltype = 7 # len(adata.obs["pred_layer"].unique())
adata.uns["pred_layer_str_colors"]=list(colors_use[:num_celltype])
cdata = adata.copy()
cdata.obs["x4"] = cdata.obs["x2"]*50
cdata.obs["x5"] = cdata.obs["x3"]*50
fig=sc.pl.scatter(cdata, alpha = 1, x = "x5", y = "x4", color = "pred_layer_str", palette = colors_use, show = False, size = 50)
fig.set_aspect('equal', 'box')
fig.figure.savefig("{path}/{name}_Layer_pred.pdf".format(path = folder, name = name), dpi = 300)
cdata.obs["Layer"] = cdata.obs["Layer"].astype(int).astype('str')
fig2=sc.pl.scatter(cdata, alpha = 1, x = "x5", y = "x4", color = "Layer", palette = colors_use, show = False, size = 50)
fig2.set_aspect('equal', 'box')
fig2.figure.savefig("{path}/{name}_Layer_ref.pdf".format(path = folder, name = name), dpi = 300)
def plot_confusion_matrix (referadata, filename, nlayer = 7):
""" Plot the confusion matrix
:referadata: the main adata that are working with
:filename: Numpy [n x 2]: the predicted coordinates based on deep neural network
"""
labellist = [i+1 for i in range(nlayer)]
conf_mat = confusion_matrix(referadata.obs[["Layer"]], referadata.obs[["pred_layer"]], labels = labellist)
conf_mat_perc = conf_mat / conf_mat.sum(axis=1, keepdims=True) # transform the matrix to be row percentage
conf_mat_CR = classification_report(referadata.obs[["Layer"]], referadata.obs[["pred_layer"]], output_dict=True, labels = labellist)
np.savetxt('{filename}.csv'.format(filename = filename), conf_mat_perc, delimiter=',')
with open('{filename}_Classification_Metric.json'.format(filename = filename), 'w') as fp:
json.dump(conf_mat_CR, fp)
# plt.figure()
# conf_mat_fig = seaheatmap(conf_mat_perc, annot=True, cmap='Blues')
# confplot = conf_mat_fig.get_figure()
# confplot.savefig("{filename}.png".format(filename = filename), dpi=400)
def make_annData_spatial (adata, spatial, min_cells = 3, filtered = False):
"""
adata: an annData file for the transcriptomics data
spatial: an pandas dataframe recording the location information for each spot
"""
if filtered == False:
adata.obs["select"] = spatial[1]
adata.obs["x_cord"] = spatial[2]
adata.obs["y_cord"] = spatial[3]
adata.obs["x_pixel"] = spatial[4]
adata.obs["y_pixel"] = spatial[5]
# Select captured samples
adata = adata[adata.obs["select"] == 1]
else:
spatialsub = spatial[spatial.iloc[:,0] == 1]
adata.obs = adata.obs.join(spatialsub)
adata.obs.columns = ['select', 'x_cord', 'y_cord', 'x_pixel', 'y_pixel']
adata.var_names = [i.upper() for i in list(adata.var_names)]
adata.var["genename"] = adata.var.index.astype("str")
#
adata.var_names_make_unique()
prefilter_genes(adata, min_cells=min_cells) # avoiding all genes are zeros
prefilter_specialgenes(adata)
#Normalize and take log for UMI
sc.pp.normalize_per_cell(adata)
sc.pp.log1p(adata)
return adata
def make_annData_query (adata):
"""
adata: an annData file for the scRNA data
"""
adata.var_names = [i.upper() for i in list(adata.var_names)]
adata.var["genename"] = adata.var.index.astype("str")
#
adata.var_names_make_unique()
prefilter_genes(adata, min_cells=3) # avoiding all genes are zeros
prefilter_specialgenes(adata)
#Normalize and take log for UMI
sc.pp.normalize_per_cell(adata)
sc.pp.log1p(adata)
return adata | PypiClean |
/EOxServer-1.2.12-py3-none-any.whl/eoxserver/services/ows/wps/parameters/__init__.py |
from .base import Parameter
from .literaldata import LiteralData
from .complexdata import (
ComplexData, CDBase, CDObject, CDTextBuffer, CDByteBuffer,
CDAsciiTextBuffer, CDFileWrapper, CDFile, CDPermanentFile,
)
from .formats import (
Format, FormatText, FormatXML, FormatJSON,
FormatBinaryRaw, FormatBinaryBase64,
)
from .codecs import Codec, CodecBase64, CodecRaw
from .bboxdata import BoundingBox, BoundingBoxData
from .units import UnitOfMeasure, UnitLinear
from .allowed_values import (
BaseAllowed, AllowedAny, AllowedEnum, AllowedRange,
AllowedRangeCollection, AllowedByReference
)
from .data_types import (
DTYPES, BaseType, Boolean, Integer, Double, String,
Duration, Date, Time, DateTime, DateTimeTZAware
)
from .crs import CRSType
from .inputs import InputReference, InputData
from .response_form import (
Output, ResponseForm, ResponseDocument, RawDataOutput
)
class RequestParameter(object):
""" Special input parameter extracting input from the request metadata.
This might be used to pass information such as, e.g., HTTP headers or
user authentication to the process like a regular input variable.
This class is the base class and it expected that `parse_request`
method get overloaded by inheritance or by a function passed as
an argument to the constructor.
"""
# pylint: disable=method-hidden, too-few-public-methods
def __init__(self, request_parser=None):
if request_parser:
self.parse_request = request_parser
def parse_request(self, request):
""" Method extracting information from the Django HTTP request object.
"""
raise NotImplementedError
def fix_parameter(name, prm):
""" Expand short-hand definition of the parameter."""
if isinstance(prm, Parameter):
return prm
elif isinstance(prm, RequestParameter):
# The leading backslash indicates an internal parameter.
# Note: backslash is not an allowed URI character and it cannot appear
# in the WPS inputs' names.
prm.identifier = "\\" + name
return prm
else:
return LiteralData(name, dtype=prm)
class Reference(object):
""" Output reference. An instance of this class defines a CommplexData
output passed by a reference. The output must be stored in a file.
Constructor parameters:
path path to the output file in the local file-system
href public URL of the output reference
mime_type output ComplexData mime-type
encoding output ComplexData encoding
schema output ComplexData schema
"""
# pylint: disable=too-few-public-methods, too-many-arguments
def __init__(self, path, href, mime_type=None, encoding=None, schema=None,
**kwarg):
self.path = path
self.href = href
self.mime_type = mime_type
self.encoding = encoding
self.schema = schema | PypiClean |
/Kallithea-0.7.0.tar.gz/Kallithea-0.7.0/kallithea/lib/indexers/daemon.py | import logging
import os
import traceback
from shutil import rmtree
from time import mktime
from tg import config
from whoosh.index import create_in, exists_in, open_dir
from whoosh.qparser import QueryParser
from kallithea.lib import celerylib
from kallithea.lib.conf import INDEX_EXTENSIONS, INDEX_FILENAMES
from kallithea.lib.indexers import CHGSET_IDX_NAME, CHGSETS_SCHEMA, IDX_NAME, SCHEMA
from kallithea.lib.utils2 import safe_str
from kallithea.lib.vcs.exceptions import ChangesetDoesNotExistError, ChangesetError, NodeDoesNotExistError, RepositoryError
from kallithea.model import db
from kallithea.model.scm import ScmModel
log = logging.getLogger('whoosh_indexer')
class WhooshIndexingDaemon(object):
"""
Daemon for atomic indexing jobs
"""
def __init__(self, indexname=IDX_NAME, index_location=None,
repo_location=None, repo_list=None,
repo_update_list=None):
self.indexname = indexname
self.index_location = index_location
if not index_location:
raise Exception('You have to provide index location')
self.repo_location = repo_location
if not repo_location:
raise Exception('You have to provide repositories location')
self.repo_paths = ScmModel().repo_scan(self.repo_location)
# filter repo list
if repo_list:
repo_list = set(repo_list)
self.filtered_repo_paths = {}
for repo_name, repo in self.repo_paths.items():
if repo_name in repo_list:
self.filtered_repo_paths[repo_name] = repo
self.repo_paths = self.filtered_repo_paths
# filter update repo list
self.filtered_repo_update_paths = {}
if repo_update_list:
self.filtered_repo_update_paths = {}
for repo_name, repo in self.repo_paths.items():
if repo_name in repo_update_list:
self.filtered_repo_update_paths[repo_name] = repo
self.repo_paths = self.filtered_repo_update_paths
self.initial = True
if not os.path.isdir(self.index_location):
os.makedirs(self.index_location)
log.info('Cannot run incremental index since it does not '
'yet exist - running full build')
elif not exists_in(self.index_location, IDX_NAME):
log.info('Running full index build, as the file content '
'index does not exist')
elif not exists_in(self.index_location, CHGSET_IDX_NAME):
log.info('Running full index build, as the changeset '
'index does not exist')
else:
self.initial = False
def _get_index_revision(self, repo):
db_repo = db.Repository.get_by_repo_name(repo.name)
landing_rev = 'tip'
if db_repo:
_rev_type, _rev = db_repo.landing_rev
landing_rev = _rev
return landing_rev
def _get_index_changeset(self, repo, index_rev=None):
if not index_rev:
index_rev = self._get_index_revision(repo)
cs = repo.get_changeset(index_rev)
return cs
def get_paths(self, repo):
"""
recursive walk in root dir and return a set of all path in that dir
based on repository walk function
"""
index_paths_ = set()
try:
cs = self._get_index_changeset(repo)
for _topnode, _dirs, files in cs.walk('/'):
for f in files:
index_paths_.add(os.path.join(repo.path, f.path))
except RepositoryError:
log.debug(traceback.format_exc())
pass
return index_paths_
def get_node(self, repo, path, index_rev=None):
"""
gets a filenode based on given full path.
:param repo: scm repo instance
:param path: full path including root location
:return: FileNode
"""
# FIXME: paths should be normalized ... or even better: don't include repo.path
assert path.startswith(repo.path)
assert path[len(repo.path)] in (os.path.sep, os.path.altsep)
node_path = path[len(repo.path) + 1:]
cs = self._get_index_changeset(repo, index_rev=index_rev)
node = cs.get_node(node_path)
return node
def is_indexable_node(self, node):
"""
Just index the content of chosen files, skipping binary files
"""
return (node.extension in INDEX_EXTENSIONS or node.name in INDEX_FILENAMES) and \
not node.is_binary
def get_node_mtime(self, node):
return mktime(node.last_changeset.date.timetuple())
def add_doc(self, writer, path, repo, repo_name, index_rev=None):
"""
Adding doc to writer this function itself fetches data from
the instance of vcs backend
"""
try:
node = self.get_node(repo, path, index_rev)
except (ChangesetError, NodeDoesNotExistError):
log.debug(" >> %s - not found in %s %s", path, repo, index_rev)
return 0, 0
indexed = indexed_w_content = 0
if self.is_indexable_node(node):
bytes_content = node.content
if b'\0' in bytes_content:
log.warning(' >> %s - no text content', path)
u_content = ''
else:
log.debug(' >> %s', path)
u_content = safe_str(bytes_content)
indexed_w_content += 1
else:
log.debug(' >> %s - not indexable', path)
# just index file name without it's content
u_content = ''
indexed += 1
writer.add_document(
fileid=path,
repository_rawname=repo_name,
repository=repo_name,
path=path,
content=u_content,
modtime=self.get_node_mtime(node),
extension=node.extension
)
return indexed, indexed_w_content
def index_changesets(self, writer, repo_name, repo, start_rev=None):
"""
Add all changeset in the vcs repo starting at start_rev
to the index writer
:param writer: the whoosh index writer to add to
:param repo_name: name of the repository from whence the
changeset originates including the repository group
:param repo: the vcs repository instance to index changesets for,
the presumption is the repo has changesets to index
:param start_rev=None: the full sha id to start indexing from
if start_rev is None then index from the first changeset in
the repo
"""
if start_rev is None:
start_rev = repo[0].raw_id
log.debug('Indexing changesets in %s, starting at rev %s',
repo_name, start_rev)
indexed = 0
cs_iter = repo.get_changesets(start=start_rev)
total = len(cs_iter)
for cs in cs_iter:
indexed += 1
log.debug(' >> %s %s/%s', cs, indexed, total)
writer.add_document(
raw_id=cs.raw_id,
date=cs._timestamp,
repository_rawname=repo_name,
repository=repo_name,
author=cs.author,
message=cs.message,
last=cs.last,
added=' '.join(node.path for node in cs.added).lower(),
removed=' '.join(node.path for node in cs.removed).lower(),
changed=' '.join(node.path for node in cs.changed).lower(),
parents=' '.join(cs.raw_id for cs in cs.parents),
)
return indexed
def index_files(self, file_idx_writer, repo_name, repo):
"""
Index files for given repo_name
:param file_idx_writer: the whoosh index writer to add to
:param repo_name: name of the repository we're indexing
:param repo: instance of vcs repo
"""
i_cnt = iwc_cnt = 0
log.debug('Building file index for %s @revision:%s', repo_name,
self._get_index_revision(repo))
index_rev = self._get_index_revision(repo)
for idx_path in self.get_paths(repo):
i, iwc = self.add_doc(file_idx_writer, idx_path, repo, repo_name, index_rev)
i_cnt += i
iwc_cnt += iwc
log.debug('added %s files %s with content for repo %s',
i_cnt + iwc_cnt, iwc_cnt, repo.path)
return i_cnt, iwc_cnt
def update_changeset_index(self):
idx = open_dir(self.index_location, indexname=CHGSET_IDX_NAME)
with idx.searcher() as searcher:
writer = idx.writer()
writer_is_dirty = False
try:
indexed_total = 0
repo_name = None
for repo_name, repo in sorted(self.repo_paths.items()):
log.debug('Updating changeset index for repo %s', repo_name)
# skip indexing if there aren't any revs in the repo
num_of_revs = len(repo)
if num_of_revs < 1:
continue
qp = QueryParser('repository', schema=CHGSETS_SCHEMA)
q = qp.parse("last:t AND %s" % repo_name)
results = searcher.search(q)
# default to scanning the entire repo
last_rev = 0
start_id = None
if len(results) > 0:
# assuming that there is only one result, if not this
# may require a full re-index.
start_id = results[0]['raw_id']
try:
last_rev = repo.get_changeset(revision=start_id).revision
except ChangesetDoesNotExistError:
log.error('previous last revision %s not found - indexing from scratch', start_id)
start_id = None
# there are new changesets to index or a new repo to index
if last_rev == 0 or num_of_revs > last_rev + 1:
# delete the docs in the index for the previous
# last changeset(s)
for hit in results:
q = qp.parse("last:t AND %s AND raw_id:%s" %
(repo_name, hit['raw_id']))
writer.delete_by_query(q)
# index from the previous last changeset + all new ones
indexed_total += self.index_changesets(writer,
repo_name, repo, start_id)
writer_is_dirty = True
log.debug('indexed %s changesets for repo %s',
indexed_total, repo_name
)
finally:
if writer_is_dirty:
log.debug('>> COMMITING CHANGES TO CHANGESET INDEX<<')
writer.commit(merge=True)
log.debug('>>> FINISHED REBUILDING CHANGESET INDEX <<<')
else:
log.debug('>> NOTHING TO COMMIT TO CHANGESET INDEX<<')
def update_file_index(self):
log.debug('STARTING INCREMENTAL INDEXING UPDATE FOR EXTENSIONS %s '
'AND REPOS %s', INDEX_EXTENSIONS, ' and '.join(self.repo_paths))
idx = open_dir(self.index_location, indexname=self.indexname)
# The set of all paths in the index
indexed_paths = set()
# The set of all paths we need to re-index
to_index = set()
writer = idx.writer()
writer_is_dirty = False
try:
with idx.reader() as reader:
# Loop over the stored fields in the index
for fields in reader.all_stored_fields():
indexed_path = fields['path']
indexed_repo_path = fields['repository']
indexed_paths.add(indexed_path)
if indexed_repo_path not in self.filtered_repo_update_paths:
continue
repo = self.repo_paths[indexed_repo_path]
try:
node = self.get_node(repo, indexed_path)
# Check if this file was changed since it was indexed
indexed_time = fields['modtime']
mtime = self.get_node_mtime(node)
if mtime > indexed_time:
# The file has changed, delete it and add it to
# the list of files to reindex
log.debug(
'adding to reindex list %s mtime: %s vs %s',
indexed_path, mtime, indexed_time
)
writer.delete_by_term('fileid', indexed_path)
writer_is_dirty = True
to_index.add(indexed_path)
except (ChangesetError, NodeDoesNotExistError):
# This file was deleted since it was indexed
log.debug('removing from index %s', indexed_path)
writer.delete_by_term('path', indexed_path)
writer_is_dirty = True
# Loop over the files in the filesystem
# Assume we have a function that gathers the filenames of the
# documents to be indexed
ri_cnt_total = 0 # indexed
riwc_cnt_total = 0 # indexed with content
for repo_name, repo in sorted(self.repo_paths.items()):
log.debug('Updating file index for repo %s', repo_name)
# skip indexing if there aren't any revisions
if len(repo) < 1:
continue
ri_cnt = 0 # indexed
riwc_cnt = 0 # indexed with content
for path in self.get_paths(repo):
if path in to_index or path not in indexed_paths:
# This is either a file that's changed, or a new file
# that wasn't indexed before. So index it!
i, iwc = self.add_doc(writer, path, repo, repo_name)
writer_is_dirty = True
ri_cnt += i
ri_cnt_total += 1
riwc_cnt += iwc
riwc_cnt_total += iwc
log.debug('added %s files %s with content for repo %s',
ri_cnt + riwc_cnt, riwc_cnt, repo.path
)
log.debug('indexed %s files in total and %s with content',
ri_cnt_total, riwc_cnt_total
)
finally:
if writer_is_dirty:
log.debug('>> COMMITING CHANGES TO FILE INDEX <<')
writer.commit(merge=True)
log.debug('>>> FINISHED REBUILDING FILE INDEX <<<')
else:
log.debug('>> NOTHING TO COMMIT TO FILE INDEX <<')
writer.cancel()
def build_indexes(self):
if os.path.exists(self.index_location):
log.debug('removing previous index')
rmtree(self.index_location)
if not os.path.exists(self.index_location):
os.mkdir(self.index_location)
chgset_idx = create_in(self.index_location, CHGSETS_SCHEMA,
indexname=CHGSET_IDX_NAME)
chgset_idx_writer = chgset_idx.writer()
file_idx = create_in(self.index_location, SCHEMA, indexname=IDX_NAME)
file_idx_writer = file_idx.writer()
log.debug('BUILDING INDEX FOR EXTENSIONS %s '
'AND REPOS %s', INDEX_EXTENSIONS, ' and '.join(self.repo_paths))
for repo_name, repo in sorted(self.repo_paths.items()):
log.debug('Updating indices for repo %s', repo_name)
# skip indexing if there aren't any revisions
if len(repo) < 1:
continue
self.index_files(file_idx_writer, repo_name, repo)
self.index_changesets(chgset_idx_writer, repo_name, repo)
log.debug('>> COMMITING CHANGES <<')
file_idx_writer.commit(merge=True)
chgset_idx_writer.commit(merge=True)
log.debug('>>> FINISHED BUILDING INDEX <<<')
def update_indexes(self):
self.update_file_index()
self.update_changeset_index()
def run(self, full_index=False):
"""Run daemon"""
if full_index or self.initial:
self.build_indexes()
else:
self.update_indexes()
@celerylib.task
@celerylib.locked_task
def whoosh_index(repo_location, full_index):
index_location = config['index_dir']
WhooshIndexingDaemon(index_location=index_location,
repo_location=repo_location) \
.run(full_index=full_index) | PypiClean |
/Haus-0.1.0.tar.gz/Haus-0.1.0/haus/components/abstract.py |
try:
from functools import wraps, update_wrapper, \
WRAPPER_ASSIGNMENTS, WRAPPER_UPDATES
except ImportError, ie:
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper, wrapped, assigned=None, updated=None):
if assigned is None:
assigned = WRAPPER_ASSIGNMENTS
if updated is None:
updated = WRAPPER_UPDATES
for attr in assigned:
try:
setattr(wrapper, attr, getattr(wrapped, attr))
except:
pass
for attr in updated:
try:
getattr(wrapper, attr).update(getattr(wrapped, attr))
except:
pass
return wrapper
def wraps(wrapped, assigned=None, updated=None):
def proxy(wrapper):
return update_wrapper(wrapper, wrapped, assigned, updated)
return update_wrapper(proxy, wrapped)
class HausComponentError(Exception): pass
class Component(object):
"""Abstract base class for haus components."""
provides = []
consumes = []
def __init__(self, wrk):
"""Stuff that happens when the application is loaded.
Register functions on the framework instance,
initialize this component at load time...
Be aware that often you must load those components
which provide framework functions before those which
consume them. This is a do-nothing by default.
"""
def __call__(self, wrk, *args, **kwargs):
"""Provide wrapper when individual handler is loaded.
In other words, this function is used by the stacker
to get the middleware to stack. How to provide that
is totally up to the component. If this is not
implemented for a component and you try to stack
it, it will raise a :class:`HausComponentError`.
"""
raise HausComponentError(
"Component %s cannot be stacked."
% getattr(self, '__name__', self.__class__.__name__)
) | PypiClean |
/Faker-19.3.1.tar.gz/Faker-19.3.1/faker/providers/ssn/nl_BE/__init__.py | from .. import Provider as SsnProvider
"""
For more info on rijksregisternummer, see https://nl.wikipedia.org/wiki/Rijksregisternummer
Dutch/French only for now ...
"""
class Provider(SsnProvider):
def ssn(self) -> str:
"""
Returns a 11 digits Belgian SSN called "rijksregisternummer" as a string
The first 6 digits represent the birthdate with (in order) year, month and day.
The second group of 3 digits is represents a sequence number (order of birth).
It is even for women and odd for men.
For men the range starts at 1 and ends 997, for women 2 until 998.
The third group of 2 digits is a checksum based on the previous 9 digits (modulo 97).
Divide those 9 digits by 97, subtract the remainder from 97 and that's the result.
For persons born in or after 2000, the 9 digit number needs to be proceeded by a 2
(add 2000000000) before the division by 97.
"""
# see http://nl.wikipedia.org/wiki/Burgerservicenummer (in Dutch)
def _checksum(digits):
res = 97 - (digits % 97)
return res
# Generate a date (random)
mydate = self.generator.date()
# Convert it to an int
elms = mydate.split("-")
# Adjust for year 2000 if necessary
if elms[0][0] == "2":
above = True
else:
above = False
# Only keep the last 2 digits of the year
elms[0] = elms[0][2:4]
# Simulate the gender/sequence - should be 3 digits
seq = self.generator.random_int(1, 998)
# Right justify sequence and append to list
seq_str = f"{seq:0>3}"
elms.append(seq_str)
# Now convert list to an integer so the checksum can be calculated
date_as_int = int("".join(elms))
if above:
date_as_int += 2000000000
# Generate checksum
s = _checksum(date_as_int)
s_rjust = f"{s:0>2}"
# return result as a string
elms.append(s_rjust)
return "".join(elms)
vat_id_formats = ("BE##########",)
def vat_id(self) -> str:
"""
http://ec.europa.eu/taxation_customs/vies/faq.html#item_11
:return: A random Belgian VAT ID
"""
return self.bothify(self.random_element(self.vat_id_formats)) | PypiClean |
/ChatPDF-2023.4.25.9.25.56-py3-none-any.whl/chatllm/uis/gradio_ui.py |
import gradio as gr
import mdtex2html
########################################################################
from chatllm.utils import llm_load, llm_load4chat
from chatllm.applications import Chat
MODEL = '/Users/betterme/PycharmProjects/AI/CHAT_MODEL/chatglm'
chat_func = llm_load4chat(MODEL, device='mps')
qa = Chat(chat_func)
########################################################################
"""Override Chatbot.postprocess"""
def postprocess(self, y):
if y is None:
return []
for i, (message, response) in enumerate(y):
y[i] = (
None if message is None else mdtex2html.convert((message)),
None if response is None else mdtex2html.convert(response),
)
return y
gr.Chatbot.postprocess = postprocess
def parse_text(text):
"""copy from https://github.com/GaiZhenbiao/ChuanhuChatGPT/"""
lines = text.split("\n")
lines = [line for line in lines if line != ""]
count = 0
for i, line in enumerate(lines):
if "```" in line:
count += 1
items = line.split('`')
if count % 2 == 1:
lines[i] = f'<pre><code class="language-{items[-1]}">'
else:
lines[i] = f'<br></code></pre>'
else:
if i > 0:
if count % 2 == 1:
line = line.replace("`", "\`")
line = line.replace("<", "<")
line = line.replace(">", ">")
line = line.replace(" ", " ")
line = line.replace("*", "*")
line = line.replace("_", "_")
line = line.replace("-", "-")
line = line.replace(".", ".")
line = line.replace("!", "!")
line = line.replace("(", "(")
line = line.replace(")", ")")
line = line.replace("$", "$")
lines[i] = "<br>" + line
text = "".join(lines)
return text
def predict(input, chatbot, max_length, top_p, temperature, history, knowledge_base=''):
chatbot.append((parse_text(input), ""))
qa.set_chat_kwargs(max_length=max_length, top_p=top_p, temperature=temperature)
for response, history in qa(query=input, knowledge_base=knowledge_base):
chatbot[-1] = (parse_text(input), parse_text(response))
yield chatbot, history
def reset_user_input():
return gr.update(value='')
def reset_state():
return [], []
with gr.Blocks() as demo:
gr.HTML("""<h1 align="center">LLM4CHAT</h1>""")
chatbot = gr.Chatbot()
with gr.Row():
with gr.Column(scale=4):
# with gr.Column(scale=2):
# knowledge_base = gr.Textbox(show_label=False, placeholder="输入知识", lines=10).style(container=False)
with gr.Column(scale=12):
user_input = gr.Textbox(show_label=False, placeholder="输入问题", lines=20).style(container=False)
with gr.Column(min_width=32, scale=1):
submitBtn = gr.Button("Submit", variant="primary")
with gr.Column(scale=1):
emptyBtn = gr.Button("Clear History")
max_length = gr.Slider(0, 4096, value=2048, step=1.0, label="Maximum length", interactive=True)
top_p = gr.Slider(0, 1, value=0.7, step=0.01, label="Top P", interactive=True)
temperature = gr.Slider(0, 1, value=0.95, step=0.01, label="Temperature", interactive=True)
knowledge_base = gr.Textbox(label='📚知识库', placeholder="输入知识", lines=10).style(container=False)
history = gr.State([])
submitBtn.click(predict, [user_input, chatbot, max_length, top_p, temperature, history, knowledge_base],
[chatbot, history],
show_progress=True)
submitBtn.click(reset_user_input, [], [user_input])
emptyBtn.click(reset_state, outputs=[chatbot, history], show_progress=True)
demo.queue().launch(share=False, inbrowser=True, debug=True) | PypiClean |
/Auto_FOX-1.0.0b1-py3-none-any.whl/FOX/recipes/psf.py | from __future__ import annotations
import math
import warnings
from types import MappingProxyType
from typing import (Union, Iterable, Optional, Callable, Mapping, Type, Iterator, TypeVar,
Any, Tuple, List, cast, Sequence, Dict, TYPE_CHECKING)
from itertools import chain
from collections import abc
import numpy as np
from scm.plams import Molecule, Atom, Bond, MoleculeError, PT
from nanoutils import group_by_values, PathType, raise_if
from FOX import PSFContainer
from FOX.io.read_psf import overlay_rtf_file, overlay_str_file
from FOX.functions.molecule_utils import fix_bond_orders
from FOX.armc.sanitization import _assign_residues
try:
from scm.plams import from_smiles, to_rdmol
from rdkit.Chem import Mol
except ImportError as ex:
RDKIT_EX: None | ImportError = ex
else:
def _get_boost_exc() -> type[TypeError]:
"""Return the Boost ``ArgumentError`` type."""
mol = to_rdmol(from_smiles("C"))
atom = mol.GetAtoms()[0]
ret: None | type[TypeError] = None
try:
# Trigger an ArgumentError by setting a non-integer charge
atom.SetFormalCharge(0.5)
except Exception as ex:
ret = type(ex)
if (ret.__qualname__ == "ArgumentError") and (ret.__module__ == "Boost.Python"):
return ret
raise TypeError("Failed to extract Boost.Python.ArgumentError") from ret
ArgumentError = _get_boost_exc()
RDKIT_EX = None
KT = TypeVar("KT")
VT = TypeVar("VT")
__all__ = ['generate_psf', 'generate_psf2', 'extract_ligand']
def generate_psf(
qd: str | Molecule,
ligand: None | str | Molecule = None,
rtf_file: None | PathType = None,
str_file: None | PathType = None,
) -> PSFContainer:
"""Generate a :class:`PSFContainer` instance for **qd**.
Parameters
----------
qd : :class:`str` or :class:`Molecule`
The ligand-pacifated quantum dot.
Should be supplied as either a Molecule or .xyz file.
ligand : :class:`str` or :class:`Molecule`, optional
A single ligand.
Should be supplied as either a Molecule or .xyz file.
rtf_file : :class:`str`, optional
The path+filename of the ligand's .rtf file.
Used for assigning atom types.
Alternativelly, one can supply a .str file with the **str_file** argument.
str_file : :class:`str`, optional
The path+filename of the ligand's .str file.
Used for assigning atom types.
Alternativelly, one can supply a .rtf file with the **rtf_file** argument.
Returns
-------
:class:`PSFContainer`
A PSFContainer instance with the new .psf file.
"""
if not isinstance(qd, Molecule):
qd = Molecule(qd)
if not isinstance(ligand, Molecule) and ligand is not None:
ligand = cast(Optional[Molecule], Molecule(ligand))
if ligand is not None:
qd_atnum = {at.atnum for at in qd}
lig_atnum = {at.atnum for at in ligand}
if not qd_atnum.issuperset(lig_atnum):
atom_symbol = ", ".join(PT.get_symbol(i) for i in sorted(lig_atnum - qd_atnum))
raise MoleculeError(f'No atoms {atom_symbol} found within {qd.get_formula()}')
# Find the start of the ligand
atnum = ligand[1].atnum
for ligand_start, at in enumerate(qd):
if at.atnum == atnum:
break
# Create an array with atomic-indice pairs defining bonds
ligand.set_atoms_id()
bonds = np.array([(b.atom1.id, b.atom2.id) for b in ligand.bonds])
bonds += ligand_start
ligand.unset_atoms_id()
# Manually add bonds to the quantum dot
ligand_len = len(ligand)
qd.delete_all_bonds()
while True:
try:
qd[bonds[0, 0]]
except IndexError:
break
else:
for j, k in bonds:
at1, at2 = qd[j], qd[k]
qd.add_bond(at1, at2)
bonds += ligand_len
# Create a nested list with residue indices
res_ar = np.arange(ligand_start, len(qd))
res_ar.shape = -1, ligand_len
res_list: List[Sequence[int]] = res_ar.tolist()
res_list.insert(0, range(ligand_start))
else:
if rtf_file is not None:
raise TypeError("`rtf_file` cannot be specified if `ligand=None`")
elif str_file is not None:
raise TypeError("`str_file` cannot be specified if `ligand=None`")
res_list = [range(len(qd))]
_assign_residues(qd, res_list)
# Create the .psf file
psf = PSFContainer()
psf.generate_bonds(qd)
psf.generate_angles(qd)
psf.generate_dihedrals(qd)
psf.generate_impropers(qd)
psf.generate_atoms(qd)
if rtf_file is not None:
overlay_rtf_file(psf, rtf_file)
if str_file is not None:
overlay_str_file(psf, str_file)
# Set the charge to zero and return
psf.charge = 0.0
return psf
def extract_ligand(
qd: str | Molecule,
ligand_len: int,
ligand_atoms: str | Iterable[str],
) -> Molecule:
"""Extract a single ligand from **qd**.
Parameters
----------
qd : :class:`str` or :class:`Molecule`
The ligand-pacifated quantum dot.
Should be supplied as either a Molecule or .xyz file.
ligand_len : :class:`int`
The number of atoms within a single ligand.
ligand_atoms : :class:`str` or :class:`Iterable<collections.abc.Iterable>` [:class:`str`]
One or multiple strings with the atomic symbols of all atoms within a single ligand.
Returns
-------
:class:`Molecule`
A single ligand Molecule.
"""
if not isinstance(qd, Molecule):
qd = Molecule(qd)
if not isinstance(ligand_atoms, set):
ligand_atoms = set(ligand_atoms) if not isinstance(ligand_atoms, str) else {ligand_atoms}
# Identify where the core ends and the ligands start
for i, at in enumerate(qd):
if at.symbol in ligand_atoms:
break
else:
raise MoleculeError(f'No atoms {tuple(ligand_atoms)} found within {qd.get_formula()}')
# Construct a ligand
j = i + ligand_len
ligand = Molecule()
ligand.atoms = [Atom(atnum=at.atnum, coords=at.coords, mol=ligand) for at in qd.atoms[i:j]]
ligand.guess_bonds()
return ligand
@raise_if(RDKIT_EX)
def generate_psf2(
qd: str | Molecule,
*ligands: str | Molecule | Mol,
rtf_file: None | PathType | Iterable[PathType] = None,
str_file: None | PathType | Iterable[PathType] = None,
ret_failed_lig: bool = False,
) -> PSFContainer:
r"""Generate a :class:`PSFContainer` instance for **qd** with multiple different **ligands**.
Note
----
Requires the optional RDKit package.
Parameters
----------
qd : :class:`str` or :class:`Molecule`
The ligand-pacifated quantum dot.
Should be supplied as either a Molecule or .xyz file.
\*ligands : :class:`str`, :class:`Molecule` or :class:`Chem.Mol`
One or more PLAMS/RDkit Molecules and/or SMILES strings representing ligands.
rtf_file : :class:`str` or :class:`Iterable<collections.abc.Iterable>` [:class:`str`], optional
The path+filename of the ligand's .rtf files.
Filenames should be supplied in the same order as **ligands**.
Used for assigning atom types.
Alternativelly, one can supply a .str file with the **str_file** argument.
str_file : :class:`str` or :class:`Iterable<collections.abc.Iterable>` [:class:`str`], optional
The path+filename of the ligand's .str files.
Filenames should be supplied in the same order as **ligands**.
Used for assigning atom types.
Alternativelly, one can supply a .rtf file with the **rtf_file** argument.
ret_failed_lig : :class:`bool`
If ``True``, return a list of all failed (potential) ligands
if the function cannot identify any ligands within a certain range.
Usefull for debugging.
If ``False``, raise a :exc:`MoleculeError`.
Returns
-------
:class:`Molecule`
A single ligand Molecule.
Raises
------
:exc:`MoleculeError`
Raised if the function fails to identify any ligands within a certain range.
If ``ret_failed_lig = True``, return a list of failed (potential) ligands instead and
issue a warning.
"""
if not isinstance(qd, Molecule):
qd = Molecule(qd)
# Create a dictionary with RDKit molecules and the number of atoms contained therein
rdmol_dict = _get_rddict(ligands)
# Find the starting atom
ligand_atoms = {at.GetAtomicNum() for rdmol in rdmol_dict for at in rdmol.GetAtoms()}
for i, at in enumerate(qd):
if at.atnum in ligand_atoms:
break
else:
raise MoleculeError(f'No atoms {tuple(PT.get_symbol(i) for i in ligand_atoms)} found '
f'within {qd.get_formula()}')
# Identify all bonds and residues
res_list = [np.arange(i)]
res_dict = {}
while True:
new, j = _get_initial_lig(qd, rdmol_dict, i)
if new is None:
break
ref0, _ = next(iter(rdmol_dict.items()))
for ref, k in rdmol_dict.items():
k = 0 if ref is ref0 else k
new = _update_lig(new, k, copy=False)
j = k or j
if _get_matches(new, ref):
qd.bonds += [Bond(atom1=qd[bond.atom1.id],
atom2=qd[bond.atom2.id],
order=bond.order, mol=qd) for bond in new.bonds]
res_list.append(np.arange(i, i+j))
res_dict[len(res_list)] = id(ref)
break
else:
continue
else:
err = (f'Failed to identify any ligands {ligands} within the range '
f'[{i}:{i + j}]')
if not ret_failed_lig:
raise MoleculeError(err)
else:
warnings.warn(err, category=MoleculeWarning)
return _return_failed_ligs(qd, rdmol_dict, i) # type: ignore
i += j
# Create the .psf file
_assign_residues(qd, res_list)
psf = PSFContainer()
psf.generate_bonds(qd)
psf.generate_angles(qd)
psf.generate_dihedrals(qd)
psf.generate_impropers(qd)
psf.generate_atoms(qd, res_dict)
if not (rtf_file is str_file is None):
_id_dict = group_by_values(res_dict.items())
id_range = (_id_dict[id(k)] for k in rdmol_dict.keys())
_overlay(psf, 'rtf', id_range, rtf_file) if rtf_file is not None else None
_overlay(psf, 'str', id_range, str_file) if str_file is not None else None
# Set the charge to zero and return
psf.charge = 0.0
return psf
def _get_initial_lig(
qd: Molecule,
rdmol_dict: Mapping[Mol, int],
i: int,
) -> Tuple[None | Molecule, int]:
"""Construct a new ligand at the begining of the :func:`generate_psf2` ``while`` loop."""
_, j = next(iter(rdmol_dict.items()))
new = Molecule()
new.atoms = [Atom(atnum=at.atnum, coords=at.coords, mol=new) for at in qd.atoms[i:i+j]]
if not new:
return None, j
elif len(new) != j: # Pad with dummy atoms
new.atoms += [Atom(atnum=0, coords=[0, 0, 0], mol=new) for _ in range(j - len(new))]
new.set_atoms_id(start=i+1)
return new, j
def _update_lig(ligand: Molecule, k: int, copy: bool = False) -> Molecule:
"""Update a ligand by removing the last **k** atoms."""
ligand = ligand.copy() if copy else ligand
atoms_del = ligand.atoms[k:] if k != 0 else []
for at in atoms_del:
ligand.delete_atom(at)
ligand.guess_bonds()
set_integer_bonds(ligand)
fix_bond_orders(ligand)
return ligand
def _return_failed_ligs(qd: Molecule, rdmol_dict: Mapping[Mol, int], i: int) -> List[Molecule]:
"""Return a list of failed ligands in case :func:`generate_psf2` fails to identify ligands."""
new, j = _get_initial_lig(qd, rdmol_dict, i)
if new is None:
raise MoleculeError
ret = []
ref0, _ = next(iter(rdmol_dict.items()))
for ref, k in rdmol_dict.items():
k = 0 if ref is ref0 else k
new = _update_lig(new, k, copy=True)
ret.append(new)
j += k
return ret
class MoleculeWarning(RuntimeWarning): # Molecule related warnings
pass
#: Map a :class:`type` object to a callable for creating :class:`rdkit.Chem.Mol` instances.
if TYPE_CHECKING or RDKIT_EX is None:
MolType = Union[Molecule, str, Mol]
MOL_MAPPING: MappingProxyType[Type[MolType], Callable[[Any], Mol]] = MappingProxyType({
str: lambda mol: to_rdmol(from_smiles(mol)),
Molecule: to_rdmol,
Mol: lambda mol: mol
})
else:
MOL_MAPPING = MappingProxyType({})
def _overlay(
psf: PSFContainer,
mode: str,
id_ranges: Iterable[Iterable[int]],
files: PathType | Iterable[PathType],
) -> None:
"""Overlay one or more .str or .rtf files."""
if not isinstance(files, abc.Iterable) or isinstance(files, (str, bytes)):
files_iter: Iterable[PathType] = (files,)
else:
files_iter = files
if mode == 'rtf':
func = overlay_rtf_file
elif mode == 'str':
func = overlay_str_file
else:
raise ValueError(f"'mode' expected either 'rtf' or 'str'; supplied value: {repr(mode)}")
for file, id_range in zip(files_iter, id_ranges):
func(psf, file, id_range=id_range)
def _items_sorted(dct: Mapping[KT, VT]) -> Iterator[Tuple[KT, VT]]:
"""Return a :meth:`dict.items()` iterator whose items are sorted by the dictionary values."""
return iter(sorted(dct.items(), key=lambda kv: kv[1], reverse=True))
@raise_if(RDKIT_EX)
def _get_matches(mol: Molecule, ref: Mol) -> bool:
"""Check if the structures of **mol** and **ref** match."""
try:
rdmol = to_rdmol(mol)
except ArgumentError:
return False
matches = rdmol.GetSubstructMatches(ref)
match_set = set(chain.from_iterable(matches))
return match_set == set(range(len(mol))) and len(match_set) == len(mol)
@raise_if(RDKIT_EX)
def _get_rddict(ligands: Iterable[str | Molecule | Mol]) -> Dict[Mol, int]:
"""Create an ordered dict with rdkit molecules and delta atom counts for :func:`generate_psf`.""" # noqa
tmp_dct = {(lig, MOL_MAPPING[type(lig)](lig)): 0 for lig in ligands}
no_bonds = []
for lig, rdmol in tmp_dct:
tmp_dct[lig, rdmol] = len(rdmol.GetAtoms())
if not len(rdmol.GetBonds()):
no_bonds.append(lig)
if no_bonds:
warnings.warn(
f'Failed to identify any bonds in the following ligands: {no_bonds!r}',
category=MoleculeWarning, stacklevel=2,
)
return {k: v for (_, k), v in _items_sorted(tmp_dct)}
def set_integer_bonds(self) -> None:
"""Convert non-integer bond orders into integers.
For example, bond orders of aromatic systems are no longer set to the non-integer
value of ``1.5``, instead adopting bond orders of ``1`` and ``2``.
The implemented function walks a set of graphs constructed from all non-integer bonds,
converting the orders of aforementioned bonds to integers by alternating calls to
:func:`math.ceil` and :func:`math.floor`.
The implication herein is that both :math:`i` and :math:`i+1` are considered valid
(integer) values for any bond order within the :math:`(i, i+1)` interval.
Floats which can be represented exactly as an integer, *e.g.* :math:`1.0`,
are herein treated as integers.
Can be used for sanitizaing any Molecules passed to the
:mod:`rdkit<scm.plams.interfaces.molecule.rdkit>` module,
as its functions are generally unable to handle Molecules with non-integer bond orders.
..code:: python
>>> from scm.plams import Molecule
>>> benzene = Molecule(...)
>>> print(benzene)
Atoms:
1 C 1.193860 -0.689276 0.000000
2 C 1.193860 0.689276 0.000000
3 C 0.000000 1.378551 0.000000
4 C -1.193860 0.689276 0.000000
5 C -1.193860 -0.689276 0.000000
6 C -0.000000 -1.378551 0.000000
7 H 2.132911 -1.231437 -0.000000
8 H 2.132911 1.231437 -0.000000
9 H 0.000000 2.462874 -0.000000
10 H -2.132911 1.231437 -0.000000
11 H -2.132911 -1.231437 -0.000000
12 H -0.000000 -2.462874 -0.000000
Bonds:
(3)--1.5--(4)
(5)--1.5--(6)
(1)--1.5--(6)
(2)--1.5--(3)
(4)--1.5--(5)
(1)--1.5--(2)
(3)--1.0--(9)
(6)--1.0--(12)
(5)--1.0--(11)
(4)--1.0--(10)
(2)--1.0--(8)
(1)--1.0--(7)
>>> benzene.set_integer_bonds()
>>> print(benzene)
Atoms:
1 C 1.193860 -0.689276 0.000000
2 C 1.193860 0.689276 0.000000
3 C 0.000000 1.378551 0.000000
4 C -1.193860 0.689276 0.000000
5 C -1.193860 -0.689276 0.000000
6 C -0.000000 -1.378551 0.000000
7 H 2.132911 -1.231437 -0.000000
8 H 2.132911 1.231437 -0.000000
9 H 0.000000 2.462874 -0.000000
10 H -2.132911 1.231437 -0.000000
11 H -2.132911 -1.231437 -0.000000
12 H -0.000000 -2.462874 -0.000000
Bonds:
(3)--1.0--(4)
(5)--1.0--(6)
(1)--2.0--(6)
(2)--2.0--(3)
(4)--2.0--(5)
(1)--1.0--(2)
(3)--1.0--(9)
(6)--1.0--(12)
(5)--1.0--(11)
(4)--1.0--(10)
(2)--1.0--(8)
(1)--1.0--(7)
"""
ceil = math.ceil
floor = math.floor
func_invert: Dict[Callable[[float], float], Callable[[float], float]] = {
ceil: floor,
floor: ceil,
}
def dfs(atom: Atom, func: Callable[[float], float]) -> None:
"""Depth-first search algorithm for integer-ifying the bond orders."""
for b2 in atom.bonds:
if b2._visited:
continue
b2._visited = True
b2.order = func(b2.order) # func = ``math.ceil()`` or ``math.floor()``
del bond_dict[b2]
atom_new = b2.atom1 if b2.atom1 is not atom else b2.atom2
dfs(atom_new, func=func_invert[func])
# Mark all non-integer bonds; floats which can be represented exactly
# by an integer (e.g. 1.0 and 2.0) are herein treated as integers
bond_dict: Dict[Bond, None] = {} # An improvised OrderedSet (as it does not exist)
for bond in self.bonds:
if hasattr(bond.order, 'is_integer') and not bond.order.is_integer():
bond._visited = False
bond_dict[bond] = None
else:
bond._visited = True
while bond_dict:
b1, _ = bond_dict.popitem()
order = b1.order
# Start with either ``math.ceil()`` if the ceiling is closer than the floor;
# start with ``math.floor()`` otherwise
delta_ceil, delta_floor = ceil(order) - order, floor(order) - order
func = ceil if abs(delta_ceil) < abs(delta_floor) else floor
b1.order = func(order)
b1._visited = True
dfs(b1.atom1, func=func_invert[func])
dfs(b1.atom2, func=func_invert[func])
for bond in self.bonds:
del bond._visited | PypiClean |
/Firmant-0.2.3a1.tar.gz/Firmant-0.2.3a1/firmant/utils/exceptions.py | import os
import tempfile
import traceback
def log_uncaught_exceptions(func, log, message, save_traceback=False):
'''Catch and log exceptions of ``func``.
Returns True if the function succeeds without throwing an exception.
`message` will be written to `log` if an exception is thrown, and False is
returned. If save_trackback is true, the traceback will be saved to a
temporary file.
In the normal case, the `func` will be called and True will be returned.
.. doctest::
:hide:
>>> log = Mock('log')
.. doctest::
>>> def wont_raise_error():
... print 'Success!'
>>> log_uncaught_exceptions(wont_raise_error, log, 'error!')
Success!
True
When `func` raises an error, the error will be caught, and `message` will be
written to log as an error.
.. doctest::
>>> def raises_error():
... raise RuntimeError('Intentionally thrown')
>>> log_uncaught_exceptions(raises_error, log, 'error!')
Called log.error('error!')
Called log.info('traceback not saved')
False
If `save_traceback` is True, then a temporary file created with
`tempfile.mkstemp` will be used to store the traceback.
.. doctest::
:hide:
>>> import tempfile
>>> f, path = tempfile.mkstemp()
>>> tempfile.mkstemp = Mock('mkstemp')
>>> tempfile.mkstemp.mock_returns = (f, path)
.. doctest::
>>> log_uncaught_exceptions(raises_error, log, 'error!', True) #doctest: +ELLIPSIS
Called log.error('error!')
Called mkstemp(prefix='firmant', text=True)
Called log.error('...traceback saved to /...')
False
.. doctest::
:hide:
>>> os.unlink(path)
>>> from minimock import restore
>>> restore()
If an exception is thrown while saving to the file, it will warn about
the potential for infinite recursion and stop:
.. doctest::
:hide:
>>> tempfile.mkstemp = Mock('mkstemp')
>>> tempfile.mkstemp.mock_raises = ValueError
.. doctest::
>>> log_uncaught_exceptions(raises_error, log, 'error!', True)
Called log.error('error!')
Called mkstemp(prefix='firmant', text=True)
Called log.error("it's turtles all the way down")
False
.. doctest::
:hide:
>>> from minimock import restore
>>> restore()
'''
try:
func()
# pylint: disable-msg=W0702
except:
log.error(message)
if save_traceback:
try:
tmp, path = tempfile.mkstemp(prefix='firmant', text=True)
tmp = os.fdopen(tmp, 'w+')
traceback.print_exc(file=tmp)
tmp.flush()
tmp.close()
log.error(_('traceback saved to %s') % path)
except:
log.error(_("it's turtles all the way down"))
else:
log.info(_('traceback not saved'))
else:
return True
return False | PypiClean |
/Booktype-1.5.tar.gz/Booktype-1.5/lib/booki/site_static/js/tiny_mce/plugins/table/editor_plugin_src.js | (function(tinymce) {
var each = tinymce.each;
// Checks if the selection/caret is at the start of the specified block element
function isAtStart(rng, par) {
var doc = par.ownerDocument, rng2 = doc.createRange(), elm;
rng2.setStartBefore(par);
rng2.setEnd(rng.endContainer, rng.endOffset);
elm = doc.createElement('body');
elm.appendChild(rng2.cloneContents());
// Check for text characters of other elements that should be treated as content
return elm.innerHTML.replace(/<(br|img|object|embed|input|textarea)[^>]*>/gi, '-').replace(/<[^>]+>/g, '').length == 0;
};
function getSpanVal(td, name) {
return parseInt(td.getAttribute(name) || 1);
}
/**
* Table Grid class.
*/
function TableGrid(table, dom, selection) {
var grid, startPos, endPos, selectedCell;
buildGrid();
selectedCell = dom.getParent(selection.getStart(), 'th,td');
if (selectedCell) {
startPos = getPos(selectedCell);
endPos = findEndPos();
selectedCell = getCell(startPos.x, startPos.y);
}
function cloneNode(node, children) {
node = node.cloneNode(children);
node.removeAttribute('id');
return node;
}
function buildGrid() {
var startY = 0;
grid = [];
each(['thead', 'tbody', 'tfoot'], function(part) {
var rows = dom.select('> ' + part + ' tr', table);
each(rows, function(tr, y) {
y += startY;
each(dom.select('> td, > th', tr), function(td, x) {
var x2, y2, rowspan, colspan;
// Skip over existing cells produced by rowspan
if (grid[y]) {
while (grid[y][x])
x++;
}
// Get col/rowspan from cell
rowspan = getSpanVal(td, 'rowspan');
colspan = getSpanVal(td, 'colspan');
// Fill out rowspan/colspan right and down
for (y2 = y; y2 < y + rowspan; y2++) {
if (!grid[y2])
grid[y2] = [];
for (x2 = x; x2 < x + colspan; x2++) {
grid[y2][x2] = {
part : part,
real : y2 == y && x2 == x,
elm : td,
rowspan : rowspan,
colspan : colspan
};
}
}
});
});
startY += rows.length;
});
};
function getCell(x, y) {
var row;
row = grid[y];
if (row)
return row[x];
};
function setSpanVal(td, name, val) {
if (td) {
val = parseInt(val);
if (val === 1)
td.removeAttribute(name, 1);
else
td.setAttribute(name, val, 1);
}
}
function isCellSelected(cell) {
return cell && (dom.hasClass(cell.elm, 'mceSelected') || cell == selectedCell);
};
function getSelectedRows() {
var rows = [];
each(table.rows, function(row) {
each(row.cells, function(cell) {
if (dom.hasClass(cell, 'mceSelected') || cell == selectedCell.elm) {
rows.push(row);
return false;
}
});
});
return rows;
};
function deleteTable() {
var rng = dom.createRng();
rng.setStartAfter(table);
rng.setEndAfter(table);
selection.setRng(rng);
dom.remove(table);
};
function cloneCell(cell) {
var formatNode;
// Clone formats
tinymce.walk(cell, function(node) {
var curNode;
if (node.nodeType == 3) {
each(dom.getParents(node.parentNode, null, cell).reverse(), function(node) {
node = cloneNode(node, false);
if (!formatNode)
formatNode = curNode = node;
else if (curNode)
curNode.appendChild(node);
curNode = node;
});
// Add something to the inner node
if (curNode)
curNode.innerHTML = tinymce.isIE ? ' ' : '<br data-mce-bogus="1" />';
return false;
}
}, 'childNodes');
cell = cloneNode(cell, false);
setSpanVal(cell, 'rowSpan', 1);
setSpanVal(cell, 'colSpan', 1);
if (formatNode) {
cell.appendChild(formatNode);
} else {
if (!tinymce.isIE)
cell.innerHTML = '<br data-mce-bogus="1" />';
}
return cell;
};
function cleanup() {
var rng = dom.createRng();
// Empty rows
each(dom.select('tr', table), function(tr) {
if (tr.cells.length == 0)
dom.remove(tr);
});
// Empty table
if (dom.select('tr', table).length == 0) {
rng.setStartAfter(table);
rng.setEndAfter(table);
selection.setRng(rng);
dom.remove(table);
return;
}
// Empty header/body/footer
each(dom.select('thead,tbody,tfoot', table), function(part) {
if (part.rows.length == 0)
dom.remove(part);
});
// Restore selection to start position if it still exists
buildGrid();
// Restore the selection to the closest table position
row = grid[Math.min(grid.length - 1, startPos.y)];
if (row) {
selection.select(row[Math.min(row.length - 1, startPos.x)].elm, true);
selection.collapse(true);
}
};
function fillLeftDown(x, y, rows, cols) {
var tr, x2, r, c, cell;
tr = grid[y][x].elm.parentNode;
for (r = 1; r <= rows; r++) {
tr = dom.getNext(tr, 'tr');
if (tr) {
// Loop left to find real cell
for (x2 = x; x2 >= 0; x2--) {
cell = grid[y + r][x2].elm;
if (cell.parentNode == tr) {
// Append clones after
for (c = 1; c <= cols; c++)
dom.insertAfter(cloneCell(cell), cell);
break;
}
}
if (x2 == -1) {
// Insert nodes before first cell
for (c = 1; c <= cols; c++)
tr.insertBefore(cloneCell(tr.cells[0]), tr.cells[0]);
}
}
}
};
function split() {
each(grid, function(row, y) {
each(row, function(cell, x) {
var colSpan, rowSpan, newCell, i;
if (isCellSelected(cell)) {
cell = cell.elm;
colSpan = getSpanVal(cell, 'colspan');
rowSpan = getSpanVal(cell, 'rowspan');
if (colSpan > 1 || rowSpan > 1) {
setSpanVal(cell, 'rowSpan', 1);
setSpanVal(cell, 'colSpan', 1);
// Insert cells right
for (i = 0; i < colSpan - 1; i++)
dom.insertAfter(cloneCell(cell), cell);
fillLeftDown(x, y, rowSpan - 1, colSpan);
}
}
});
});
};
function merge(cell, cols, rows) {
var startX, startY, endX, endY, x, y, startCell, endCell, cell, children, count;
// Use specified cell and cols/rows
if (cell) {
pos = getPos(cell);
startX = pos.x;
startY = pos.y;
endX = startX + (cols - 1);
endY = startY + (rows - 1);
} else {
// Use selection
startX = startPos.x;
startY = startPos.y;
endX = endPos.x;
endY = endPos.y;
}
// Find start/end cells
startCell = getCell(startX, startY);
endCell = getCell(endX, endY);
// Check if the cells exists and if they are of the same part for example tbody = tbody
if (startCell && endCell && startCell.part == endCell.part) {
// Split and rebuild grid
split();
buildGrid();
// Set row/col span to start cell
startCell = getCell(startX, startY).elm;
setSpanVal(startCell, 'colSpan', (endX - startX) + 1);
setSpanVal(startCell, 'rowSpan', (endY - startY) + 1);
// Remove other cells and add it's contents to the start cell
for (y = startY; y <= endY; y++) {
for (x = startX; x <= endX; x++) {
if (!grid[y] || !grid[y][x])
continue;
cell = grid[y][x].elm;
if (cell != startCell) {
// Move children to startCell
children = tinymce.grep(cell.childNodes);
each(children, function(node) {
startCell.appendChild(node);
});
// Remove bogus nodes if there is children in the target cell
if (children.length) {
children = tinymce.grep(startCell.childNodes);
count = 0;
each(children, function(node) {
if (node.nodeName == 'BR' && dom.getAttrib(node, 'data-mce-bogus') && count++ < children.length - 1)
startCell.removeChild(node);
});
}
// Remove cell
dom.remove(cell);
}
}
}
// Remove empty rows etc and restore caret location
cleanup();
}
};
function insertRow(before) {
var posY, cell, lastCell, x, rowElm, newRow, newCell, otherCell, rowSpan;
// Find first/last row
each(grid, function(row, y) {
each(row, function(cell, x) {
if (isCellSelected(cell)) {
cell = cell.elm;
rowElm = cell.parentNode;
newRow = cloneNode(rowElm, false);
posY = y;
if (before)
return false;
}
});
if (before)
return !posY;
});
for (x = 0; x < grid[0].length; x++) {
// Cell not found could be because of an invalid table structure
if (!grid[posY][x])
continue;
cell = grid[posY][x].elm;
if (cell != lastCell) {
if (!before) {
rowSpan = getSpanVal(cell, 'rowspan');
if (rowSpan > 1) {
setSpanVal(cell, 'rowSpan', rowSpan + 1);
continue;
}
} else {
// Check if cell above can be expanded
if (posY > 0 && grid[posY - 1][x]) {
otherCell = grid[posY - 1][x].elm;
rowSpan = getSpanVal(otherCell, 'rowSpan');
if (rowSpan > 1) {
setSpanVal(otherCell, 'rowSpan', rowSpan + 1);
continue;
}
}
}
// Insert new cell into new row
newCell = cloneCell(cell);
setSpanVal(newCell, 'colSpan', cell.colSpan);
newRow.appendChild(newCell);
lastCell = cell;
}
}
if (newRow.hasChildNodes()) {
if (!before)
dom.insertAfter(newRow, rowElm);
else
rowElm.parentNode.insertBefore(newRow, rowElm);
}
};
function insertCol(before) {
var posX, lastCell;
// Find first/last column
each(grid, function(row, y) {
each(row, function(cell, x) {
if (isCellSelected(cell)) {
posX = x;
if (before)
return false;
}
});
if (before)
return !posX;
});
each(grid, function(row, y) {
var cell, rowSpan, colSpan;
if (!row[posX])
return;
cell = row[posX].elm;
if (cell != lastCell) {
colSpan = getSpanVal(cell, 'colspan');
rowSpan = getSpanVal(cell, 'rowspan');
if (colSpan == 1) {
if (!before) {
dom.insertAfter(cloneCell(cell), cell);
fillLeftDown(posX, y, rowSpan - 1, colSpan);
} else {
cell.parentNode.insertBefore(cloneCell(cell), cell);
fillLeftDown(posX, y, rowSpan - 1, colSpan);
}
} else
setSpanVal(cell, 'colSpan', cell.colSpan + 1);
lastCell = cell;
}
});
};
function deleteCols() {
var cols = [];
// Get selected column indexes
each(grid, function(row, y) {
each(row, function(cell, x) {
if (isCellSelected(cell) && tinymce.inArray(cols, x) === -1) {
each(grid, function(row) {
var cell = row[x].elm, colSpan;
colSpan = getSpanVal(cell, 'colSpan');
if (colSpan > 1)
setSpanVal(cell, 'colSpan', colSpan - 1);
else
dom.remove(cell);
});
cols.push(x);
}
});
});
cleanup();
};
function deleteRows() {
var rows;
function deleteRow(tr) {
var nextTr, pos, lastCell;
nextTr = dom.getNext(tr, 'tr');
// Move down row spanned cells
each(tr.cells, function(cell) {
var rowSpan = getSpanVal(cell, 'rowSpan');
if (rowSpan > 1) {
setSpanVal(cell, 'rowSpan', rowSpan - 1);
pos = getPos(cell);
fillLeftDown(pos.x, pos.y, 1, 1);
}
});
// Delete cells
pos = getPos(tr.cells[0]);
each(grid[pos.y], function(cell) {
var rowSpan;
cell = cell.elm;
if (cell != lastCell) {
rowSpan = getSpanVal(cell, 'rowSpan');
if (rowSpan <= 1)
dom.remove(cell);
else
setSpanVal(cell, 'rowSpan', rowSpan - 1);
lastCell = cell;
}
});
};
// Get selected rows and move selection out of scope
rows = getSelectedRows();
// Delete all selected rows
each(rows.reverse(), function(tr) {
deleteRow(tr);
});
cleanup();
};
function cutRows() {
var rows = getSelectedRows();
dom.remove(rows);
cleanup();
return rows;
};
function copyRows() {
var rows = getSelectedRows();
each(rows, function(row, i) {
rows[i] = cloneNode(row, true);
});
return rows;
};
function pasteRows(rows, before) {
var selectedRows = getSelectedRows(),
targetRow = selectedRows[before ? 0 : selectedRows.length - 1],
targetCellCount = targetRow.cells.length;
// Calc target cell count
each(grid, function(row) {
var match;
targetCellCount = 0;
each(row, function(cell, x) {
if (cell.real)
targetCellCount += cell.colspan;
if (cell.elm.parentNode == targetRow)
match = 1;
});
if (match)
return false;
});
if (!before)
rows.reverse();
each(rows, function(row) {
var cellCount = row.cells.length, cell;
// Remove col/rowspans
for (i = 0; i < cellCount; i++) {
cell = row.cells[i];
setSpanVal(cell, 'colSpan', 1);
setSpanVal(cell, 'rowSpan', 1);
}
// Needs more cells
for (i = cellCount; i < targetCellCount; i++)
row.appendChild(cloneCell(row.cells[cellCount - 1]));
// Needs less cells
for (i = targetCellCount; i < cellCount; i++)
dom.remove(row.cells[i]);
// Add before/after
if (before)
targetRow.parentNode.insertBefore(row, targetRow);
else
dom.insertAfter(row, targetRow);
});
};
function getPos(target) {
var pos;
each(grid, function(row, y) {
each(row, function(cell, x) {
if (cell.elm == target) {
pos = {x : x, y : y};
return false;
}
});
return !pos;
});
return pos;
};
function setStartCell(cell) {
startPos = getPos(cell);
};
function findEndPos() {
var pos, maxX, maxY;
maxX = maxY = 0;
each(grid, function(row, y) {
each(row, function(cell, x) {
var colSpan, rowSpan;
if (isCellSelected(cell)) {
cell = grid[y][x];
if (x > maxX)
maxX = x;
if (y > maxY)
maxY = y;
if (cell.real) {
colSpan = cell.colspan - 1;
rowSpan = cell.rowspan - 1;
if (colSpan) {
if (x + colSpan > maxX)
maxX = x + colSpan;
}
if (rowSpan) {
if (y + rowSpan > maxY)
maxY = y + rowSpan;
}
}
}
});
});
return {x : maxX, y : maxY};
};
function setEndCell(cell) {
var startX, startY, endX, endY, maxX, maxY, colSpan, rowSpan;
endPos = getPos(cell);
if (startPos && endPos) {
// Get start/end positions
startX = Math.min(startPos.x, endPos.x);
startY = Math.min(startPos.y, endPos.y);
endX = Math.max(startPos.x, endPos.x);
endY = Math.max(startPos.y, endPos.y);
// Expand end positon to include spans
maxX = endX;
maxY = endY;
// Expand startX
for (y = startY; y <= maxY; y++) {
cell = grid[y][startX];
if (!cell.real) {
if (startX - (cell.colspan - 1) < startX)
startX -= cell.colspan - 1;
}
}
// Expand startY
for (x = startX; x <= maxX; x++) {
cell = grid[startY][x];
if (!cell.real) {
if (startY - (cell.rowspan - 1) < startY)
startY -= cell.rowspan - 1;
}
}
// Find max X, Y
for (y = startY; y <= endY; y++) {
for (x = startX; x <= endX; x++) {
cell = grid[y][x];
if (cell.real) {
colSpan = cell.colspan - 1;
rowSpan = cell.rowspan - 1;
if (colSpan) {
if (x + colSpan > maxX)
maxX = x + colSpan;
}
if (rowSpan) {
if (y + rowSpan > maxY)
maxY = y + rowSpan;
}
}
}
}
// Remove current selection
dom.removeClass(dom.select('td.mceSelected,th.mceSelected'), 'mceSelected');
// Add new selection
for (y = startY; y <= maxY; y++) {
for (x = startX; x <= maxX; x++) {
if (grid[y][x])
dom.addClass(grid[y][x].elm, 'mceSelected');
}
}
}
};
// Expose to public
tinymce.extend(this, {
deleteTable : deleteTable,
split : split,
merge : merge,
insertRow : insertRow,
insertCol : insertCol,
deleteCols : deleteCols,
deleteRows : deleteRows,
cutRows : cutRows,
copyRows : copyRows,
pasteRows : pasteRows,
getPos : getPos,
setStartCell : setStartCell,
setEndCell : setEndCell
});
};
tinymce.create('tinymce.plugins.TablePlugin', {
init : function(ed, url) {
var winMan, clipboardRows, hasCellSelection = true; // Might be selected cells on reload
function createTableGrid(node) {
var selection = ed.selection, tblElm = ed.dom.getParent(node || selection.getNode(), 'table');
if (tblElm)
return new TableGrid(tblElm, ed.dom, selection);
};
function cleanup() {
// Restore selection possibilities
ed.getBody().style.webkitUserSelect = '';
if (hasCellSelection) {
ed.dom.removeClass(ed.dom.select('td.mceSelected,th.mceSelected'), 'mceSelected');
hasCellSelection = false;
}
};
// Register buttons
each([
['table', 'table.desc', 'mceInsertTable', true],
['delete_table', 'table.del', 'mceTableDelete'],
['delete_col', 'table.delete_col_desc', 'mceTableDeleteCol'],
['delete_row', 'table.delete_row_desc', 'mceTableDeleteRow'],
['col_after', 'table.col_after_desc', 'mceTableInsertColAfter'],
['col_before', 'table.col_before_desc', 'mceTableInsertColBefore'],
['row_after', 'table.row_after_desc', 'mceTableInsertRowAfter'],
['row_before', 'table.row_before_desc', 'mceTableInsertRowBefore'],
['row_props', 'table.row_desc', 'mceTableRowProps', true],
['cell_props', 'table.cell_desc', 'mceTableCellProps', true],
['split_cells', 'table.split_cells_desc', 'mceTableSplitCells', true],
['merge_cells', 'table.merge_cells_desc', 'mceTableMergeCells', true]
], function(c) {
ed.addButton(c[0], {title : c[1], cmd : c[2], ui : c[3]});
});
// Select whole table is a table border is clicked
if (!tinymce.isIE) {
ed.onClick.add(function(ed, e) {
e = e.target;
if (e.nodeName === 'TABLE') {
ed.selection.select(e);
ed.nodeChanged();
}
});
}
ed.onPreProcess.add(function(ed, args) {
var nodes, i, node, dom = ed.dom, value;
nodes = dom.select('table', args.node);
i = nodes.length;
while (i--) {
node = nodes[i];
dom.setAttrib(node, 'data-mce-style', '');
if ((value = dom.getAttrib(node, 'width'))) {
dom.setStyle(node, 'width', value);
dom.setAttrib(node, 'width', '');
}
if ((value = dom.getAttrib(node, 'height'))) {
dom.setStyle(node, 'height', value);
dom.setAttrib(node, 'height', '');
}
}
});
// Handle node change updates
ed.onNodeChange.add(function(ed, cm, n) {
var p;
n = ed.selection.getStart();
p = ed.dom.getParent(n, 'td,th,caption');
cm.setActive('table', n.nodeName === 'TABLE' || !!p);
// Disable table tools if we are in caption
if (p && p.nodeName === 'CAPTION')
p = 0;
cm.setDisabled('delete_table', !p);
cm.setDisabled('delete_col', !p);
cm.setDisabled('delete_table', !p);
cm.setDisabled('delete_row', !p);
cm.setDisabled('col_after', !p);
cm.setDisabled('col_before', !p);
cm.setDisabled('row_after', !p);
cm.setDisabled('row_before', !p);
cm.setDisabled('row_props', !p);
cm.setDisabled('cell_props', !p);
cm.setDisabled('split_cells', !p);
cm.setDisabled('merge_cells', !p);
});
ed.onInit.add(function(ed) {
var startTable, startCell, dom = ed.dom, tableGrid;
winMan = ed.windowManager;
// Add cell selection logic
ed.onMouseDown.add(function(ed, e) {
if (e.button != 2) {
cleanup();
startCell = dom.getParent(e.target, 'td,th');
startTable = dom.getParent(startCell, 'table');
}
});
dom.bind(ed.getDoc(), 'mouseover', function(e) {
var sel, table, target = e.target;
if (startCell && (tableGrid || target != startCell) && (target.nodeName == 'TD' || target.nodeName == 'TH')) {
table = dom.getParent(target, 'table');
if (table == startTable) {
if (!tableGrid) {
tableGrid = createTableGrid(table);
tableGrid.setStartCell(startCell);
ed.getBody().style.webkitUserSelect = 'none';
}
tableGrid.setEndCell(target);
hasCellSelection = true;
}
// Remove current selection
sel = ed.selection.getSel();
try {
if (sel.removeAllRanges)
sel.removeAllRanges();
else
sel.empty();
} catch (ex) {
// IE9 might throw errors here
}
e.preventDefault();
}
});
ed.onMouseUp.add(function(ed, e) {
var rng, sel = ed.selection, selectedCells, nativeSel = sel.getSel(), walker, node, lastNode, endNode;
// Move selection to startCell
if (startCell) {
if (tableGrid)
ed.getBody().style.webkitUserSelect = '';
function setPoint(node, start) {
var walker = new tinymce.dom.TreeWalker(node, node);
do {
// Text node
if (node.nodeType == 3 && tinymce.trim(node.nodeValue).length != 0) {
if (start)
rng.setStart(node, 0);
else
rng.setEnd(node, node.nodeValue.length);
return;
}
// BR element
if (node.nodeName == 'BR') {
if (start)
rng.setStartBefore(node);
else
rng.setEndBefore(node);
return;
}
} while (node = (start ? walker.next() : walker.prev()));
}
// Try to expand text selection as much as we can only Gecko supports cell selection
selectedCells = dom.select('td.mceSelected,th.mceSelected');
if (selectedCells.length > 0) {
rng = dom.createRng();
node = selectedCells[0];
endNode = selectedCells[selectedCells.length - 1];
rng.setStartBefore(node);
rng.setEndAfter(node);
setPoint(node, 1);
walker = new tinymce.dom.TreeWalker(node, dom.getParent(selectedCells[0], 'table'));
do {
if (node.nodeName == 'TD' || node.nodeName == 'TH') {
if (!dom.hasClass(node, 'mceSelected'))
break;
lastNode = node;
}
} while (node = walker.next());
setPoint(lastNode);
sel.setRng(rng);
}
ed.nodeChanged();
startCell = tableGrid = startTable = null;
}
});
ed.onKeyUp.add(function(ed, e) {
cleanup();
});
ed.onKeyDown.add(function (ed, e) {
fixTableCellSelection(ed);
});
ed.onMouseDown.add(function (ed, e) {
if (e.button != 2) {
fixTableCellSelection(ed);
}
});
function tableCellSelected(ed, rng, n, currentCell) {
// The decision of when a table cell is selected is somewhat involved. The fact that this code is
// required is actually a pointer to the root cause of this bug. A cell is selected when the start
// and end offsets are 0, the start container is a text, and the selection node is either a TR (most cases)
// or the parent of the table (in the case of the selection containing the last cell of a table).
var TEXT_NODE = 3, table = ed.dom.getParent(rng.startContainer, 'TABLE'),
tableParent, allOfCellSelected, tableCellSelection;
if (table)
tableParent = table.parentNode;
allOfCellSelected =rng.startContainer.nodeType == TEXT_NODE &&
rng.startOffset == 0 &&
rng.endOffset == 0 &&
currentCell &&
(n.nodeName=="TR" || n==tableParent);
tableCellSelection = (n.nodeName=="TD"||n.nodeName=="TH")&& !currentCell;
return allOfCellSelected || tableCellSelection;
// return false;
}
// this nasty hack is here to work around some WebKit selection bugs.
function fixTableCellSelection(ed) {
if (!tinymce.isWebKit)
return;
var rng = ed.selection.getRng();
var n = ed.selection.getNode();
var currentCell = ed.dom.getParent(rng.startContainer, 'TD,TH');
if (!tableCellSelected(ed, rng, n, currentCell))
return;
if (!currentCell) {
currentCell=n;
}
// Get the very last node inside the table cell
var end = currentCell.lastChild;
while (end.lastChild)
end = end.lastChild;
// Select the entire table cell. Nothing outside of the table cell should be selected.
rng.setEnd(end, end.nodeValue.length);
ed.selection.setRng(rng);
}
ed.plugins.table.fixTableCellSelection=fixTableCellSelection;
// Add context menu
if (ed && ed.plugins.contextmenu) {
ed.plugins.contextmenu.onContextMenu.add(function(th, m, e) {
var sm, se = ed.selection, el = se.getNode() || ed.getBody();
if (ed.dom.getParent(e, 'td') || ed.dom.getParent(e, 'th') || ed.dom.select('td.mceSelected,th.mceSelected').length) {
m.removeAll();
if (el.nodeName == 'A' && !ed.dom.getAttrib(el, 'name')) {
m.add({title : 'advanced.link_desc', icon : 'link', cmd : ed.plugins.advlink ? 'mceAdvLink' : 'mceLink', ui : true});
m.add({title : 'advanced.unlink_desc', icon : 'unlink', cmd : 'UnLink'});
m.addSeparator();
}
if (el.nodeName == 'IMG' && el.className.indexOf('mceItem') == -1) {
m.add({title : 'advanced.image_desc', icon : 'image', cmd : ed.plugins.advimage ? 'mceAdvImage' : 'mceImage', ui : true});
m.addSeparator();
}
m.add({title : 'table.desc', icon : 'table', cmd : 'mceInsertTable', value : {action : 'insert'}});
m.add({title : 'table.props_desc', icon : 'table_props', cmd : 'mceInsertTable'});
m.add({title : 'table.del', icon : 'delete_table', cmd : 'mceTableDelete'});
m.addSeparator();
// Cell menu
sm = m.addMenu({title : 'table.cell'});
sm.add({title : 'table.cell_desc', icon : 'cell_props', cmd : 'mceTableCellProps'});
sm.add({title : 'table.split_cells_desc', icon : 'split_cells', cmd : 'mceTableSplitCells'});
sm.add({title : 'table.merge_cells_desc', icon : 'merge_cells', cmd : 'mceTableMergeCells'});
// Row menu
sm = m.addMenu({title : 'table.row'});
sm.add({title : 'table.row_desc', icon : 'row_props', cmd : 'mceTableRowProps'});
sm.add({title : 'table.row_before_desc', icon : 'row_before', cmd : 'mceTableInsertRowBefore'});
sm.add({title : 'table.row_after_desc', icon : 'row_after', cmd : 'mceTableInsertRowAfter'});
sm.add({title : 'table.delete_row_desc', icon : 'delete_row', cmd : 'mceTableDeleteRow'});
sm.addSeparator();
sm.add({title : 'table.cut_row_desc', icon : 'cut', cmd : 'mceTableCutRow'});
sm.add({title : 'table.copy_row_desc', icon : 'copy', cmd : 'mceTableCopyRow'});
sm.add({title : 'table.paste_row_before_desc', icon : 'paste', cmd : 'mceTablePasteRowBefore'}).setDisabled(!clipboardRows);
sm.add({title : 'table.paste_row_after_desc', icon : 'paste', cmd : 'mceTablePasteRowAfter'}).setDisabled(!clipboardRows);
// Column menu
sm = m.addMenu({title : 'table.col'});
sm.add({title : 'table.col_before_desc', icon : 'col_before', cmd : 'mceTableInsertColBefore'});
sm.add({title : 'table.col_after_desc', icon : 'col_after', cmd : 'mceTableInsertColAfter'});
sm.add({title : 'table.delete_col_desc', icon : 'delete_col', cmd : 'mceTableDeleteCol'});
} else
m.add({title : 'table.desc', icon : 'table', cmd : 'mceInsertTable'});
});
}
// Fix to allow navigating up and down in a table in WebKit browsers.
if (tinymce.isWebKit) {
function moveSelection(ed, e) {
var VK = tinymce.VK;
var key = e.keyCode;
function handle(upBool, sourceNode, event) {
var siblingDirection = upBool ? 'previousSibling' : 'nextSibling';
var currentRow = ed.dom.getParent(sourceNode, 'tr');
var siblingRow = currentRow[siblingDirection];
if (siblingRow) {
moveCursorToRow(ed, sourceNode, siblingRow, upBool);
tinymce.dom.Event.cancel(event);
return true;
} else {
var tableNode = ed.dom.getParent(currentRow, 'table');
var middleNode = currentRow.parentNode;
var parentNodeName = middleNode.nodeName.toLowerCase();
if (parentNodeName === 'tbody' || parentNodeName === (upBool ? 'tfoot' : 'thead')) {
var targetParent = getTargetParent(upBool, tableNode, middleNode, 'tbody');
if (targetParent !== null) {
return moveToRowInTarget(upBool, targetParent, sourceNode, event);
}
}
return escapeTable(upBool, currentRow, siblingDirection, tableNode, event);
}
}
function getTargetParent(upBool, topNode, secondNode, nodeName) {
var tbodies = ed.dom.select('>' + nodeName, topNode);
var position = tbodies.indexOf(secondNode);
if (upBool && position === 0 || !upBool && position === tbodies.length - 1) {
return getFirstHeadOrFoot(upBool, topNode);
} else if (position === -1) {
var topOrBottom = secondNode.tagName.toLowerCase() === 'thead' ? 0 : tbodies.length - 1;
return tbodies[topOrBottom];
} else {
return tbodies[position + (upBool ? -1 : 1)];
}
}
function getFirstHeadOrFoot(upBool, parent) {
var tagName = upBool ? 'thead' : 'tfoot';
var headOrFoot = ed.dom.select('>' + tagName, parent);
return headOrFoot.length !== 0 ? headOrFoot[0] : null;
}
function moveToRowInTarget(upBool, targetParent, sourceNode, event) {
var targetRow = getChildForDirection(targetParent, upBool);
targetRow && moveCursorToRow(ed, sourceNode, targetRow, upBool);
tinymce.dom.Event.cancel(event);
return true;
}
function escapeTable(upBool, currentRow, siblingDirection, table, event) {
var tableSibling = table[siblingDirection];
if (tableSibling) {
moveCursorToStartOfElement(tableSibling);
return true;
} else {
var parentCell = ed.dom.getParent(table, 'td,th');
if (parentCell) {
return handle(upBool, parentCell, event);
} else {
var backUpSibling = getChildForDirection(currentRow, !upBool);
moveCursorToStartOfElement(backUpSibling);
return tinymce.dom.Event.cancel(event);
}
}
}
function getChildForDirection(parent, up) {
return parent && parent[up ? 'lastChild' : 'firstChild'];
}
function moveCursorToStartOfElement(n) {
ed.selection.setCursorLocation(n, 0);
}
function isVerticalMovement() {
return key == VK.UP || key == VK.DOWN;
}
function isInTable(ed) {
var node = ed.selection.getNode();
var currentRow = ed.dom.getParent(node, 'tr');
return currentRow !== null;
}
function columnIndex(column) {
var colIndex = 0;
var c = column;
while (c.previousSibling) {
c = c.previousSibling;
colIndex = colIndex + getSpanVal(c, "colspan");
}
return colIndex;
}
function findColumn(rowElement, columnIndex) {
var c = 0;
var r = 0;
each(rowElement.children, function(cell, i) {
c = c + getSpanVal(cell, "colspan");
r = i;
if (c > columnIndex)
return false;
});
return r;
}
function moveCursorToRow(ed, node, row, upBool) {
var srcColumnIndex = columnIndex(ed.dom.getParent(node, 'td,th'));
var tgtColumnIndex = findColumn(row, srcColumnIndex);
var tgtNode = row.childNodes[tgtColumnIndex];
var rowCellTarget = getChildForDirection(tgtNode, upBool);
moveCursorToStartOfElement(rowCellTarget || tgtNode);
}
function shouldFixCaret(preBrowserNode) {
var newNode = ed.selection.getNode();
var newParent = ed.dom.getParent(newNode, 'td,th');
var oldParent = ed.dom.getParent(preBrowserNode, 'td,th');
return newParent && newParent !== oldParent && checkSameParentTable(newParent, oldParent)
}
function checkSameParentTable(nodeOne, NodeTwo) {
return ed.dom.getParent(nodeOne, 'TABLE') === ed.dom.getParent(NodeTwo, 'TABLE');
}
if (isVerticalMovement() && isInTable(ed)) {
var preBrowserNode = ed.selection.getNode();
setTimeout(function() {
if (shouldFixCaret(preBrowserNode)) {
handle(!e.shiftKey && key === VK.UP, preBrowserNode, e);
}
}, 0);
}
}
ed.onKeyDown.add(moveSelection);
}
// Fixes an issue on Gecko where it's impossible to place the caret behind a table
// This fix will force a paragraph element after the table but only when the forced_root_block setting is enabled
if (!tinymce.isIE) {
function fixTableCaretPos() {
var last;
// Skip empty text nodes form the end
for (last = ed.getBody().lastChild; last && last.nodeType == 3 && !last.nodeValue.length; last = last.previousSibling) ;
if (last && last.nodeName == 'TABLE')
ed.dom.add(ed.getBody(), 'p', null, '<br mce_bogus="1" />');
};
// Fixes an bug where it's impossible to place the caret before a table in Gecko
// this fix solves it by detecting when the caret is at the beginning of such a table
// and then manually moves the caret infront of the table
if (tinymce.isGecko) {
ed.onKeyDown.add(function(ed, e) {
var rng, table, dom = ed.dom;
// On gecko it's not possible to place the caret before a table
if (e.keyCode == 37 || e.keyCode == 38) {
rng = ed.selection.getRng();
table = dom.getParent(rng.startContainer, 'table');
if (table && ed.getBody().firstChild == table) {
if (isAtStart(rng, table)) {
rng = dom.createRng();
rng.setStartBefore(table);
rng.setEndBefore(table);
ed.selection.setRng(rng);
e.preventDefault();
}
}
}
});
}
ed.onKeyUp.add(fixTableCaretPos);
ed.onSetContent.add(fixTableCaretPos);
ed.onVisualAid.add(fixTableCaretPos);
ed.onPreProcess.add(function(ed, o) {
var last = o.node.lastChild;
if (last && last.childNodes.length == 1 && last.firstChild.nodeName == 'BR')
ed.dom.remove(last);
});
fixTableCaretPos();
ed.startContent = ed.getContent({format : 'raw'});
}
});
// Register action commands
each({
mceTableSplitCells : function(grid) {
grid.split();
},
mceTableMergeCells : function(grid) {
var rowSpan, colSpan, cell;
cell = ed.dom.getParent(ed.selection.getNode(), 'th,td');
if (cell) {
rowSpan = cell.rowSpan;
colSpan = cell.colSpan;
}
if (!ed.dom.select('td.mceSelected,th.mceSelected').length) {
winMan.open({
url : url + '/merge_cells.htm',
width : 240 + parseInt(ed.getLang('table.merge_cells_delta_width', 0)),
height : 110 + parseInt(ed.getLang('table.merge_cells_delta_height', 0)),
inline : 1
}, {
rows : rowSpan,
cols : colSpan,
onaction : function(data) {
grid.merge(cell, data.cols, data.rows);
},
plugin_url : url
});
} else
grid.merge();
},
mceTableInsertRowBefore : function(grid) {
grid.insertRow(true);
},
mceTableInsertRowAfter : function(grid) {
grid.insertRow();
},
mceTableInsertColBefore : function(grid) {
grid.insertCol(true);
},
mceTableInsertColAfter : function(grid) {
grid.insertCol();
},
mceTableDeleteCol : function(grid) {
grid.deleteCols();
},
mceTableDeleteRow : function(grid) {
grid.deleteRows();
},
mceTableCutRow : function(grid) {
clipboardRows = grid.cutRows();
},
mceTableCopyRow : function(grid) {
clipboardRows = grid.copyRows();
},
mceTablePasteRowBefore : function(grid) {
grid.pasteRows(clipboardRows, true);
},
mceTablePasteRowAfter : function(grid) {
grid.pasteRows(clipboardRows);
},
mceTableDelete : function(grid) {
grid.deleteTable();
}
}, function(func, name) {
ed.addCommand(name, function() {
var grid = createTableGrid();
if (grid) {
func(grid);
ed.execCommand('mceRepaint');
cleanup();
}
});
});
// Register dialog commands
each({
mceInsertTable : function(val) {
winMan.open({
url : url + '/table.htm',
width : 400 + parseInt(ed.getLang('table.table_delta_width', 0)),
height : 320 + parseInt(ed.getLang('table.table_delta_height', 0)),
inline : 1
}, {
plugin_url : url,
action : val ? val.action : 0
});
},
mceTableRowProps : function() {
winMan.open({
url : url + '/row.htm',
width : 400 + parseInt(ed.getLang('table.rowprops_delta_width', 0)),
height : 295 + parseInt(ed.getLang('table.rowprops_delta_height', 0)),
inline : 1
}, {
plugin_url : url
});
},
mceTableCellProps : function() {
winMan.open({
url : url + '/cell.htm',
width : 400 + parseInt(ed.getLang('table.cellprops_delta_width', 0)),
height : 295 + parseInt(ed.getLang('table.cellprops_delta_height', 0)),
inline : 1
}, {
plugin_url : url
});
}
}, function(func, name) {
ed.addCommand(name, function(ui, val) {
func(val);
});
});
}
});
// Register plugin
tinymce.PluginManager.add('table', tinymce.plugins.TablePlugin);
})(tinymce); | PypiClean |
/OTLModel/Classes/Onderdeel/Camera.py | from OTLMOW.OTLModel.BaseClasses.OTLAttribuut import OTLAttribuut
from OTLMOW.OTLModel.Classes.ImplementatieElement.AIMNaamObject import AIMNaamObject
from OTLMOW.OTLModel.Datatypes.BooleanField import BooleanField
from OTLMOW.OTLModel.Datatypes.DtcCameraBeeldverwerking import DtcCameraBeeldverwerking
from OTLMOW.OTLModel.Datatypes.DtcDocument import DtcDocument
from OTLMOW.OTLModel.Datatypes.DteIPv4Adres import DteIPv4Adres
from OTLMOW.OTLModel.Datatypes.KlCameraMerk import KlCameraMerk
from OTLMOW.OTLModel.Datatypes.KlCameraModelnaam import KlCameraModelnaam
from OTLMOW.OTLModel.Datatypes.KlServicePrioriteit import KlServicePrioriteit
from OTLMOW.OTLModel.Datatypes.KwantWrdInMeter import KwantWrdInMeter
from OTLMOW.OTLModel.Datatypes.StringField import StringField
from OTLMOW.GeometrieArtefact.PuntGeometrie import PuntGeometrie
# Generated with OTLClassCreator. To modify: extend, do not edit
class Camera(AIMNaamObject, PuntGeometrie):
"""Een CCTV-camera, closed-circuit television camera, kortweg camera, produceert beelden of opnames voor bewaking van een regio vanop afstand. Het is een element dat beelden neemt van een locatie en deze doorgeeft naar verschillende partijen om zo de werkelijke situatie te kunnen inschatten vanop afstand. Deze camera kan van het analoge type zijn of digitaal."""
typeURI = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera'
"""De URI van het object volgens https://www.w3.org/2001/XMLSchema#anyURI."""
def __init__(self):
AIMNaamObject.__init__(self)
PuntGeometrie.__init__(self)
self._beeldverwerkingsinstelling = OTLAttribuut(field=DtcCameraBeeldverwerking,
naam='beeldverwerkingsinstelling',
label='beeldverwerkingsinstelling',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.beeldverwerkingsinstelling',
usagenote='Wanneer de camera de beeldverwerking niet zelf doet maar enkel beelden verstuurt voor verwerking in een externe eenheid, moet die externe eenheid als aparte asset aangemaakt worden indien het specifieke type bestaat in de OTL of moet een instantie van Software gebruikt worden wanneer geen specifieke externe verwerkingseenheid voorzien is. Dit attribuut kan dus enkel gebruikt worden indien de camera of een verwerkingseenheid van de camera zelf de analyse doet en die analyse doorstuurt naar een asset die met de analyse werkt en niet met de beelden.',
kardinaliteit_max='*',
definition='Geeft aan welke types beeldverwerking die camera zelf uitvoert dus zonder gebruik te maken van een externe verwerkingseenheid.',
owner=self)
self._configBestandAid = OTLAttribuut(field=DtcDocument,
naam='configBestandAid',
label='configuratie bestand AID',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.configBestandAid',
usagenote='Attribuut uit gebruik sinds versie 2.3.0 ',
deprecated_version='2.3.0',
definition='Het bestand met de configuratie van de AID component die deel is van de camera.',
owner=self)
self._dnsNaam = OTLAttribuut(field=StringField,
naam='dnsNaam',
label='DNS naam',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.dnsNaam',
definition='De DNSNaam (ook "volledige domein naam" genoemd ) is een unieke naam binnen het Domain Name System (DNS), het naamgevingssysteem waarmee computers, webservers, diensten en toepassing op een unieke manier kunnen worden geïdentificeerd. Deze bevat zowel de hostname en de top level domein naam bv. 120c8-ar1.belfa.be.',
owner=self)
self._heeftAid = OTLAttribuut(field=BooleanField,
naam='heeftAid',
label='heeft AID',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.heeftAid',
usagenote='Attribuut uit gebruik sinds versie 2.3.0 ',
deprecated_version='2.3.0',
definition='Een AID-camera is een CCTV-camera met geintegreerde AID-module. Deze camera genereert naast een camerabeeld ook metadata ivm wat zich afspeelt op het beeld. Een voorbeeld hiervan is gestopte voertuigen.',
owner=self)
self._heeftSpitsstrook = OTLAttribuut(field=BooleanField,
naam='heeftSpitsstrook',
label='heeft spitsstrook',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.heeftSpitsstrook',
usagenote='Attribuut uit gebruik sinds versie 2.3.0 ',
deprecated_version='2.3.0',
definition='Locatie-eigenschap van een camera. Dit attribuut geeft aan of de camera ingezet wordt om een spitsstrook te schouwen.',
owner=self)
self._ipAdres = OTLAttribuut(field=DteIPv4Adres,
naam='ipAdres',
label='ip adres',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.ipAdres',
definition='Het IP-adres van de camera.',
owner=self)
self._isPtz = OTLAttribuut(field=BooleanField,
naam='isPtz',
label='is PTZ',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.isPtz',
definition='Een PTZ-camera is een CCTV-camera met bijhorend de mogelijkheid om te pannen, tilten en zoomen. Dit vanop afstand met behulp van een verstelbare lens en een motor die in twee assen draaibeweging toelaat.',
owner=self)
self._merk = OTLAttribuut(field=KlCameraMerk,
naam='merk',
label='merk',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.merk',
definition='Het merk van de camera.',
owner=self)
self._modelnaam = OTLAttribuut(field=KlCameraModelnaam,
naam='modelnaam',
label='modelnaam',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.modelnaam',
definition='De modelnaam van de camera.',
owner=self)
self._opstelhoogte = OTLAttribuut(field=KwantWrdInMeter,
naam='opstelhoogte',
label='opstelhoogte',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.opstelhoogte',
usagenote='De plaats waar de draagconstructie in de grond bevestigd is, bepaalt van waar gemeten wordt voor het bepalen van de opstelhoogte. Wanneer een camera die een brugdek overziet, bevestigd is aan een paal die naast de brug staat, wordt de hoogte gemeten vanaf de basis van de paal en niet vanaf het brugdek. ',
definition='De hoogte waarop de camera bevestigd is, gemeten ten opzichte van het maaiveld waarin de draagconstructie voor de camera verankerd is.',
owner=self)
self._servicePrioriteit = OTLAttribuut(field=KlServicePrioriteit,
naam='servicePrioriteit',
label='Service Prioriteit',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.servicePrioriteit',
definition='Het prioriteitsniveau dat aangeeft hoe dringend iets moet onderhouden/gerepareerd worden',
owner=self)
self._technischeFiche = OTLAttribuut(field=DtcDocument,
naam='technischeFiche',
label='technische fiche',
objectUri='https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#Camera.technischeFiche',
usagenote='Bestanden van het type pdf.',
definition="Technische fiche van dit element met opsplitsing tussen CCTV, AID en PTZ-camera's.",
owner=self)
@property
def beeldverwerkingsinstelling(self):
"""Geeft aan welke types beeldverwerking die camera zelf uitvoert dus zonder gebruik te maken van een externe verwerkingseenheid."""
return self._beeldverwerkingsinstelling.get_waarde()
@beeldverwerkingsinstelling.setter
def beeldverwerkingsinstelling(self, value):
self._beeldverwerkingsinstelling.set_waarde(value, owner=self)
@property
def configBestandAid(self):
"""Het bestand met de configuratie van de AID component die deel is van de camera."""
return self._configBestandAid.get_waarde()
@configBestandAid.setter
def configBestandAid(self, value):
self._configBestandAid.set_waarde(value, owner=self)
@property
def dnsNaam(self):
"""De DNSNaam (ook "volledige domein naam" genoemd ) is een unieke naam binnen het Domain Name System (DNS), het naamgevingssysteem waarmee computers, webservers, diensten en toepassing op een unieke manier kunnen worden geïdentificeerd. Deze bevat zowel de hostname en de top level domein naam bv. 120c8-ar1.belfa.be."""
return self._dnsNaam.get_waarde()
@dnsNaam.setter
def dnsNaam(self, value):
self._dnsNaam.set_waarde(value, owner=self)
@property
def heeftAid(self):
"""Een AID-camera is een CCTV-camera met geintegreerde AID-module. Deze camera genereert naast een camerabeeld ook metadata ivm wat zich afspeelt op het beeld. Een voorbeeld hiervan is gestopte voertuigen."""
return self._heeftAid.get_waarde()
@heeftAid.setter
def heeftAid(self, value):
self._heeftAid.set_waarde(value, owner=self)
@property
def heeftSpitsstrook(self):
"""Locatie-eigenschap van een camera. Dit attribuut geeft aan of de camera ingezet wordt om een spitsstrook te schouwen."""
return self._heeftSpitsstrook.get_waarde()
@heeftSpitsstrook.setter
def heeftSpitsstrook(self, value):
self._heeftSpitsstrook.set_waarde(value, owner=self)
@property
def ipAdres(self):
"""Het IP-adres van de camera."""
return self._ipAdres.get_waarde()
@ipAdres.setter
def ipAdres(self, value):
self._ipAdres.set_waarde(value, owner=self)
@property
def isPtz(self):
"""Een PTZ-camera is een CCTV-camera met bijhorend de mogelijkheid om te pannen, tilten en zoomen. Dit vanop afstand met behulp van een verstelbare lens en een motor die in twee assen draaibeweging toelaat."""
return self._isPtz.get_waarde()
@isPtz.setter
def isPtz(self, value):
self._isPtz.set_waarde(value, owner=self)
@property
def merk(self):
"""Het merk van de camera."""
return self._merk.get_waarde()
@merk.setter
def merk(self, value):
self._merk.set_waarde(value, owner=self)
@property
def modelnaam(self):
"""De modelnaam van de camera."""
return self._modelnaam.get_waarde()
@modelnaam.setter
def modelnaam(self, value):
self._modelnaam.set_waarde(value, owner=self)
@property
def opstelhoogte(self):
"""De hoogte waarop de camera bevestigd is, gemeten ten opzichte van het maaiveld waarin de draagconstructie voor de camera verankerd is."""
return self._opstelhoogte.get_waarde()
@opstelhoogte.setter
def opstelhoogte(self, value):
self._opstelhoogte.set_waarde(value, owner=self)
@property
def servicePrioriteit(self):
"""Het prioriteitsniveau dat aangeeft hoe dringend iets moet onderhouden/gerepareerd worden"""
return self._servicePrioriteit.get_waarde()
@servicePrioriteit.setter
def servicePrioriteit(self, value):
self._servicePrioriteit.set_waarde(value, owner=self)
@property
def technischeFiche(self):
"""Technische fiche van dit element met opsplitsing tussen CCTV, AID en PTZ-camera's."""
return self._technischeFiche.get_waarde()
@technischeFiche.setter
def technischeFiche(self, value):
self._technischeFiche.set_waarde(value, owner=self) | PypiClean |
/MarkDo-0.3.0.tar.gz/MarkDo-0.3.0/markdo/static/bower/codemirror/lib/util/formatting.js | (function() {
CodeMirror.extendMode("css", {
commentStart: "/*",
commentEnd: "*/",
newlineAfterToken: function(type, content) {
return /^[;{}]$/.test(content);
}
});
CodeMirror.extendMode("javascript", {
commentStart: "/*",
commentEnd: "*/",
// FIXME semicolons inside of for
newlineAfterToken: function(type, content, textAfter, state) {
if (this.jsonMode) {
return /^[\[,{]$/.test(content) || /^}/.test(textAfter);
} else {
if (content == ";" && state.lexical && state.lexical.type == ")") return false;
return /^[;{}]$/.test(content) && !/^;/.test(textAfter);
}
}
});
CodeMirror.extendMode("xml", {
commentStart: "<!--",
commentEnd: "-->",
newlineAfterToken: function(type, content, textAfter) {
return type == "tag" && />$/.test(content) || /^</.test(textAfter);
}
});
// Comment/uncomment the specified range
CodeMirror.defineExtension("commentRange", function (isComment, from, to) {
var cm = this, curMode = CodeMirror.innerMode(cm.getMode(), cm.getTokenAt(from).state).mode;
cm.operation(function() {
if (isComment) { // Comment range
cm.replaceRange(curMode.commentEnd, to);
cm.replaceRange(curMode.commentStart, from);
if (from.line == to.line && from.ch == to.ch) // An empty comment inserted - put cursor inside
cm.setCursor(from.line, from.ch + curMode.commentStart.length);
} else { // Uncomment range
var selText = cm.getRange(from, to);
var startIndex = selText.indexOf(curMode.commentStart);
var endIndex = selText.lastIndexOf(curMode.commentEnd);
if (startIndex > -1 && endIndex > -1 && endIndex > startIndex) {
// Take string till comment start
selText = selText.substr(0, startIndex)
// From comment start till comment end
+ selText.substring(startIndex + curMode.commentStart.length, endIndex)
// From comment end till string end
+ selText.substr(endIndex + curMode.commentEnd.length);
}
cm.replaceRange(selText, from, to);
}
});
});
// Applies automatic mode-aware indentation to the specified range
CodeMirror.defineExtension("autoIndentRange", function (from, to) {
var cmInstance = this;
this.operation(function () {
for (var i = from.line; i <= to.line; i++) {
cmInstance.indentLine(i, "smart");
}
});
});
// Applies automatic formatting to the specified range
CodeMirror.defineExtension("autoFormatRange", function (from, to) {
var cm = this;
var outer = cm.getMode(), text = cm.getRange(from, to).split("\n");
var state = CodeMirror.copyState(outer, cm.getTokenAt(from).state);
var tabSize = cm.getOption("tabSize");
var out = "", lines = 0, atSol = from.ch == 0;
function newline() {
out += "\n";
atSol = true;
++lines;
}
for (var i = 0; i < text.length; ++i) {
var stream = new CodeMirror.StringStream(text[i], tabSize);
while (!stream.eol()) {
var inner = CodeMirror.innerMode(outer, state);
var style = outer.token(stream, state), cur = stream.current();
stream.start = stream.pos;
if (!atSol || /\S/.test(cur)) {
out += cur;
atSol = false;
}
if (!atSol && inner.mode.newlineAfterToken &&
inner.mode.newlineAfterToken(style, cur, stream.string.slice(stream.pos) || text[i+1] || "", inner.state))
newline();
}
if (!stream.pos && outer.blankLine) outer.blankLine(state);
if (!atSol) newline();
}
cm.operation(function () {
cm.replaceRange(out, from, to);
for (var cur = from.line + 1, end = from.line + lines; cur <= end; ++cur)
cm.indentLine(cur, "smart");
cm.setSelection(from, cm.getCursor(false));
});
});
})(); | PypiClean |
/Ibidas-0.1.26.tar.gz/Ibidas-0.1.26/ibidas/itypes/detector.py | import numpy
import array
from collections import defaultdict
import operator
import platform
from itertools import chain
import rtypes
from ..constants import *
from ..utils import sparse_arrays, module_types
from ..utils.missing import *
import re
_delay_import_(globals(),"dimensions")
_delay_import_(globals(),"dimpaths")
_delay_import_(globals(),"..utils","util")
_scanchildren = defaultdict(list)
def registerTypeScanner(newscancls):
_scanchildren[newscancls.parentcls].append(newscancls)
missing_cls = set([MissingType])
missing_val = set([Missing])
class Detector(object):
def __init__(self, parent_scanner=None, dim_eq=None, allow_need_convert=False):
self._scanners = None
self.objectclss = set()
self.count_elem = 0
self.allow_need_convert = allow_need_convert
if not dim_eq:
dim_eq = DimEqualizer()
self.dim_eq = dim_eq
self.parent_scanner = parent_scanner
return None
def setParentDimensions(self,dims):
assert self.count_elem == 0, "Cannot perform setParentDimensions after process(Seq) has been called"
self._scanners = [OuterContainerScanner(self,dims)]
def process(self, obj):
self.processSeq(sparse_arrays.FullSparse(util.darray([obj])))
def processSeq(self, seq):
if not isinstance(seq, (sparse_arrays.FullSparse)):
if isinstance(seq, (set, frozenset)):
seq = util.darray(list(seq))
elif not isinstance(seq, numpy.ndarray):
seq = util.darray(seq)
seq = sparse_arrays.FullSparse(seq)
objclasses = seq.classes
self.objectclss |= objclasses
elem_count = len(seq)
if not objclasses.issubset(missing_cls):
if not self._scanners:
test_seq = seq[:100]
seq = seq[100:]
self._scanners = list(self._findAcceptableDescendants(test_seq))
if len(seq) > 0:
for pos in xrange(len(self._scanners) - 1, -1, -1):
scanner = self._scanners[pos]
while not scanner.scan(seq):
del self._scanners[pos]
if not any([scanner.parentcls in s.ancestor_scanners for s in self._scanners]):
scanner = scanner.unregister(create_parent=True)
self._scanners.insert(pos,scanner)
continue
scanner.unregister()
break
self.count_elem += elem_count
def getType(self):
if not self._scanners:
return rtypes.TypeUnknown()
if not self.parent_scanner:
self.dim_eq.processDims()
restypes = [scanner.getType() for scanner in self._scanners]
restypes = [t for t in restypes if t is not None]
assert not len(restypes) == 0,'BUG: no valid type could be detected'
if len(restypes) > 1:
if not all([r.has_missing for r in restypes]):
raise RuntimeError, 'Could not decide between types: ' + str(restypes)
else:
return rtypes.TypeAny(has_missing=True);
return restypes[0]
def _findAcceptableDescendants(self, seq, scanner=None):
schildren = [sc(self) for sc in _scanchildren[scanner.__class__] if (sc.need_convert == True and self.allow_need_convert == True) or sc.need_convert == False]
res = set()
for sc in schildren:
if sc.scan(seq):
res.update(self._findAcceptableDescendants(seq, sc))
else:
sc.unregister()
if not res:
return (scanner,)
if scanner:
scanner.unregister()
return res
def hasMissing(self):
return not missing_cls.isdisjoint(self.objectclss)
LENGTH_NOINIT = 0 #length type not yet initialized
LENGTH_FIXED = 1 #lengths are fixed (all the same or missing)
LENGTH_VAR = 2 #variable lengths
LENGTH_REPEAT = 3 #lengths repeated in a pattern dictated by parent dimensions
class DimRep(object):
def __init__(self, parent_scanner, index=0):
self.parent_scanner = parent_scanner
self.index = index
self.lengths = None #stores length data. FIXED: int, VAR: list of lengths, REPEAT: list of lengths
self.length_count = 0 #FIXED: stores number of elements, REPEAT: number of repeats
self.length_type = LENGTH_NOINIT
self.nparents = 0
self.has_missing = False
self.dim = None
self.dirty = True
def processLengths(self, lengths, has_missing=False):
self.has_missing = max(self.has_missing, has_missing)
self.dirty = True
if has_missing:
lenghts = lengths.replace_missing(-1, otype=int)
if self.length_type == LENGTH_FIXED or self.length_type == LENGTH_NOINIT:
lenset = set(lengths)
lenset.discard(-1)
if len(lenset) == 0:
self.length_count += len(lengths)
elif len(lenset) > 1:
self._fullLengths()
self.processLengths(lengths)
else:
length = lenset.pop()
if self.length_type == LENGTH_NOINIT:
self.lengths = length
self.length_type = LENGTH_FIXED
if self.lengths == length:
self.length_count += len(lengths)
else:
self._fullLengths()
self.processLengths(lengths)
else:
if self.length_type == LENGTH_VAR:
self.lengths.extend(lengths)
else:
self._fullLengths()
self.processLengths(lengths)
def _fullLengths(self):
if self.length_type == LENGTH_FIXED:
self.lengths = [self.lengths] * self.length_count
elif self.length_type == LENGTH_NOINIT:
self.lengths = []
elif self.length_type == LENGTH_REPEAT:
self.lengths = self.lengths * self.length_count
self.nparents = 0
self.length_type = LENGTH_VAR
def _setDim(self, dim):
self.dirty = False
self.dim = dim
def checkRepeats(self, repeat_length, nparents):
if self.length_type == LENGTH_FIXED or self.length_type == LENGTH_NOINIT:
return False
if self.length_type == LENGTH_REPEAT:
return self.lengths == repeat_length
lengths = self.lengths
if len(lengths) % repeat_length:
return False
repeat = lengths[:repeat_length]
lengths = lengths[repeat_length:]
while lengths:
if repeat != lengths[:repeat_length]:
return False
lengths = lengths[repeat_length:]
self.length_count = len(lengths) / repeat_length
self.lengths = repeat
self.nparents = nparents
self.length_type = LENGTH_REPEAT
return True
def getDim(self):
assert self.dirty == False,'DimRep is dirty, has dim equalizer been processed?'
return self.dim
def combineWith(self, other):
self.dirty = True
if self.length_type == LENGTH_FIXED and other.length_type == LENGTH_FIXED:
assert self.length_count == other.length_count,'Cannot combine dimreps with unequal lengths'
self.lengths = self.lengths * other.lengths
else:
self._fullLengths()
other._fullLengths()
assert len(self.lengths) == len(other.lengths),'Cannot combine dimreps with unequal lenghts'
self.lengths = list(numpy.multiply(self.lengths, other.lengths))
def getParentDimReps(self):
return self.parent_scanner.getDimReps(self.index)
class FixedDimRep(DimRep):
def __init__(self, parent_scanner, index, dim):
self.parent_scanner = parent_scanner
self.index = index
if(dim.isVariable()):
self.length_type = LENGTH_VAR
self.lengths = []
else:
self.length_type = LENGTH_FIXED
if isinstance(dim.shape,int):
self.lengths = dim.shape
else:
self.lengths = UNDEFINED
#FIXME: give lengths of variable dimensions from data, will also enable repeat var dimensions
self.nparents = 0
self.has_missing = dim.has_missing
self.dim = dim
self.dirty = False
class DimEqualizer(object):
def __init__(self):
self.dimreps = []
def registerDimRep(self, dimrep):
self.dimreps.append(dimrep)
def unregisterDimRep(self, dimrep):
del self.dimreps[self.dimreps.index(dimrep)]
def _processParents(self, dimrep):
cur_parents = dimrep.getParentDimReps()
for parent in cur_parents:
if parent.dirty:
self._attachDim(parent)
pos = len(cur_parents)
repeat_length = 1
while pos:
pos -= 1
parent = cur_parents[pos]
assert (parent.length_type != LENGTH_NOINIT), 'Unitialized parent should not be possible'
if parent.length_type == LENGTH_VAR: #A VAR as parent means that no repeat could be found, i.e. lengths vary across all parents
break
elif parent.length_type == LENGTH_REPEAT:
repeat_length *= sum(parent.lengths)
pos -= parent.nparents
elif parent.lengths == UNDEFINED: # length is UNDEFINED, no further checks possible
break
else:
repeat_length *= parent.lengths
if dimrep.checkRepeats(repeat_length, len(cur_parents) - pos):
cur_parents = cur_parents[-dimrep.nparents:]
break
return cur_parents
def _attachDim(self, dimrep):
assert (dimrep.length_type != LENGTH_NOINIT), 'Unitialized parent should not be possible'
if dimrep.length_type != LENGTH_FIXED:
cur_parents = self._processParents(dimrep)
parents = cur_parents
else:
parents = []
for dr in self.dimreps:
if dr.dirty is True:
continue
if dr.has_missing != dimrep.has_missing:
continue
if dimrep.length_type != dr.length_type:
continue
if dr.lengths != dimrep.lengths:
continue
if dimrep.length_type == LENGTH_FIXED:
dimrep._setDim(dr.dim)
return dr.dim
match_parents = dr.getParentDimReps()[-len(parents):]
if len(match_parents) != len(cur_parents):
continue
for mp, cp in zip(match_parents, cur_parents):
if mp.dirty:
self._attachDim(mp)
if cp.dim is not mp.dim:
break
else:
dimrep._setDim(dr.dim)
return dr.dim
if dimrep.length_type == LENGTH_FIXED:
ndim = dimensions.Dim(dimrep.lengths, tuple(), dimrep.has_missing,name="d"+str(self.dimid()))
else:
ndim = dimensions.Dim(UNDEFINED, (True,) * len(parents), dimrep.has_missing,name="d"+str(self.dimid()))
dimrep._setDim(ndim)
return ndim
def processDims(self):
self.dimid = util.seqgen().next
for dr in self.dimreps:
if dr.dirty:
self._attachDim(dr)
class TypeScanner(object):
need_convert=False
typecls = rtypes.TypeAny
good_cls = set()
def __init__(self, detector):
self.detector = detector
self.dimreps = []
def unregister(self, create_parent=False):
for d in self.dimreps:
self.detector.dim_eq.unregisterDimRep(d)
if create_parent:
return self.parentcls(self.detector)
def scan(self, seq):
return self.detector.objectclss.issubset(self.good_cls)
def getType(self):
return self.typecls(self.detector.hasMissing())
def getAncestorScanners(self):
if not hasattr(self.__class__, 'ancest_cls_cache'):
acc = []
cur_cls = self.__class__.parentcls
while cur_cls is not None.__class__:
acc.append(cur_cls)
cur_cls = cur_cls.parentcls
self.__class__.ancest_cls_cache = acc
return self.__class__.ancest_cls_cache
ancestor_scanners = property(fget=getAncestorScanners)
def getSubDetector(self, id=0):
id = 'subdetector_' + str(id)
if id not in self.__dict__:
d = Detector(self, self.detector.dim_eq, self.detector.allow_need_convert)
setattr(self, id, d)
if self.detector.count_elem:
d.processSeq([Missing] * self.detector.count_elem)
return getattr(self, id)
def getDimReps(self, last_index=None):
ps = self.detector.parent_scanner
if last_index is None:
sdimreps = self.dimreps[:]
else:
sdimreps = self.dimreps[:last_index]
if ps:
if not self.dimreps:
return ps.getDimReps()
res = ps.getDimReps()
res.extend(sdimreps)
return res
return sdimreps
def getDimRep(self, i):
if len(self.dimreps) <= i:
assert len(self.dimreps) == i,'DimReps not requested in order'
d = DimRep(self, i)
if self.detector.count_elem:
d.processLengths(sparse_arrays.FullSparse([Missing] * self.detector.count_elem), has_missing=True)
self.detector.dim_eq.registerDimRep(d)
self.dimreps.append(d)
return self.dimreps[i]
def reduceDimReps(self, i):
while len(self.dimreps) > i:
d = self.dimreps.pop()
self.dimreps[-1].combineWith(d)
self.detector.dim_eq.unregisterDimRep(d)
self.min_dim = i
class AnyScanner(TypeScanner):
parentcls = None.__class__
def unregister(self, create_parent=False):
if create_parent:
raise RuntimeError, 'Attempting to find parent for AnyScanner'
return TypeScanner.__init__(self,create_parent)
def scan(self, seq):
return True
registerTypeScanner(AnyScanner)
TypeScanner.parentcls = AnyScanner
class TupleScanner(TypeScanner):
good_cls = set([tuple, MissingType])
def __init__(self, detector):
TypeScanner.__init__(self, detector)
self.max_len = 0
self.min_len = 999999999
self.fieldnames = []
def getType(self):
if(not len(self.fieldnames) == self.max_len):
self.fieldnames = ['f' + str(i) for i in xrange(self.max_len)]
subtypes = tuple([self.getSubDetector(i).getType() for i in xrange(self.max_len)])
return rtypes.TypeTuple(self.detector.hasMissing(), subtypes, self.fieldnames)
def scan(self, seq):
if not self.detector.objectclss.issubset(self.good_cls):
return False
l = seq.map(len, out_empty=0, has_missing=self.detector.hasMissing(), otype=int)
maxlen = l.max(out_empty=0)
minlen = l.min(out_empty=self.min_len)
self.max_len = max(maxlen, self.max_len)
self.min_len = min(minlen, self.min_len)
if(len(seq) == 1 and self.max_len == self.min_len):
self.fieldnames = util.find_names(seq[0])
for i in xrange(self.max_len):
d = self.getSubDetector(i)
f = operator.itemgetter(i)
if i < self.min_len:
subseq = seq.map(f, has_missing=self.detector.hasMissing())
else:
subseq = seq.filter_tomissing(l > i).map(f, out_empty=Missing, otype=object, has_missing=True)
d.processSeq(subseq)
return True
registerTypeScanner(TupleScanner)
class NamedTupleScanner(TypeScanner):
bad_cls = set([tuple])
def __init__(self, detector):
TypeScanner.__init__(self, detector)
self.tuple_cls = None
def getType(self):
fieldnames = [util.valid_name(name) for name in self.tuple_cls._fields]
subtypes = tuple([self.getSubDetector(pos).getType() for pos in range(len(fieldnames))])
return rtypes.TypeTuple(self.detector.hasMissing(), subtypes, fieldnames)
def scan(self, seq):
if self.bad_cls.issubset(self.detector.objectclss):
return False
for cls in self.detector.objectclss:
if cls is MissingType:
continue
if not self.tuple_cls:
if not hasattr(cls, '_fields'):
return False
self.tuple_cls = cls
elif not self.tuple_cls is cls:
return False
fieldlen = len(self.tuple_cls._fields)
for i in xrange(fieldlen):
d = self.getSubDetector(i)
f = operator.itemgetter(i)
subseq = seq.map(f, has_missing=self.detector.hasMissing())
d.processSeq(subseq)
return True
registerTypeScanner(NamedTupleScanner)
class RecordDictScanner(TypeScanner):
good_cls = set([dict, module_types.soap_struct, MissingType])
def __init__(self, detector):
TypeScanner.__init__(self, detector)
self.names = set()
def getType(self):
fieldnames = [name for name in self.names]
subtypes = tuple([self.getSubDetector(name).getType() for name in self.names])
return rtypes.TypeRecordDict(self.detector.hasMissing(), subtypes, fieldnames)
def scan(self, seq):
if not self.detector.objectclss.issubset(self.good_cls):
return False
names = self.names.copy()
if dict in self.detector.objectclss:
assert not module_types.soap_struct in self.detector.objectclss, "dict cannot be mixed with SOAPpy struct type"
for elem in seq:
if not (elem is Missing):
names.update(elem.keys())
else:
assert not dict in self.detector.objectclss, "dict cannot be mixed with SOAPpy struct type"
for elem in seq:
if not (elem is Missing):
names.update(elem._keys())
newnames = names - self.names
for name in newnames:
if not isinstance(name, basestring) or util.valid_name(name) != name:
return False
self.names = names
if(len(self.names) > 100):
return False
for name in self.names:
d = self.getSubDetector(name)
f = operator.itemgetter(name)
def getname(elem):
try:
return elem[name]
except KeyError:
return Missing
subseq = seq.map(getname, otype=object)
d.processSeq(subseq)
return True
registerTypeScanner(RecordDictScanner)
class ContainerScanner(TypeScanner):
good_cls = set([set, frozenset, MissingType, list, array.array, numpy.ndarray])
bad_cls = set([tuple, str, unicode, numpy.unicode_, numpy.string_, module_types.soap_struct])
def __init__(self, detector):
TypeScanner.__init__(self, detector)
self.min_dim = None
def getType(self):
subtype = self.getSubDetector().getType()
dims = dimpaths.DimPath(*[self.getDimRep(i).dim for i in xrange(self.min_dim)])
if self.detector.hasMissing():
dims[0].has_missing = True
return dimpaths.dimsToArrays(dims, subtype)
def scan(self, seq):
has_missing = self.detector.hasMissing()
if not self.detector.objectclss.issubset(self.good_cls):
if not self.detector.objectclss.isdisjoint(self.bad_cls):
return False
for cls in self.detector.objectclss:
if issubclass(cls, tuple):
return False
l = seq.map(operator.isSequenceType, otype=bool, out_empty=True, has_missing=has_missing)
if not l.all(has_missing=False):
return False
if(self.min_dim == 1):
dr = self.getDimRep(0)
nelems = seq.map(getnelem, otype=object, out_empty=Missing, has_missing=has_missing)
dr.processLengths(nelems, has_missing=has_missing)
else:
shapes = seq.map(getshape, otype=object, out_empty=Missing, has_missing=has_missing)
shapelens = shapes.map(len, otype=object, out_empty=Missing, has_missing=has_missing)
min_dim = shapelens.min(has_missing=has_missing)
max_dim = shapelens.max(has_missing=has_missing)
if(min_dim is Missing or max_dim is Missing):
if(self.min_dim):
for i in xrange(self.min_dim):
dr = self.getDimRep(i)
dr.processLengths(shapelens, has_missing=has_missing)
else:
if self.min_dim is None:
self.min_dim = min_dim
else:
if min_dim < self.min_dim:
self.reduceDimReps(min_dim)
for i in xrange(self.min_dim):
dr = self.getDimRep(i)
if i == self.min_dim and self.min_dim < max_dim:
red = numpy.multiply.reduce
def reduceshape(shape):
return red(shape[i:])
f = reduceshape
else:
f = operator.itemgetter(i)
nelems = shapes.map(f, otype=object, out_empty=Missing, has_missing=has_missing)
dr.processLengths(nelems.ravel(), has_missing=has_missing)
d = self.getSubDetector()
if(self.min_dim == 1 and not has_missing):
d.processSeq(list(chain(*seq.ravel())))
else:
for subseq in seq.ravel():
if not (subseq is Missing):
d.processSeq(subseq)
return True
def getshape(elem):
try:
return elem.shape
except:
return (len(elem),)
def getnelem(elem):
try:
return len(elem.ravel())
except:
return len(elem)
registerTypeScanner(ContainerScanner)
class OuterContainerScanner(ContainerScanner):
parentcls = None.__class__
def __init__(self, detector, dims):
ContainerScanner.__init__(self,detector)
self.dimreps = [FixedDimRep(self,pos,dim) for pos,dim in enumerate(dims)]
def scan(self,seq):
d = self.getSubDetector()
d.processSeq(seq)
return True
def getType(self):
return self.getSubDetector().getType()
class SetScanner(ContainerScanner):
parentcls = ContainerScanner
good_cls = set([set, frozenset, MissingType])
def getType(self):
subtype = self.getSubDetector().getType()
dim = dimensions.Dim(UNDEFINED, (True,) * len(self.getDimReps(0)), self.detector.hasMissing())
dims = dimpaths.DimPath(dim)
return rtypes.TypeSet(self.detector.hasMissing(), dims, (subtype,))
def unregister(self, create_parent=False):
parent = ContainerScanner.unregister(self, create_parent)
if create_parent:
parent.min_dim = 1
parent.dimreps = self.dimreps
return parent
def scan(self, seq):
has_missing = self.detector.hasMissing()
if not self.detector.objectclss.issubset(self.good_cls):
return False
d = self.getSubDetector()
for subseq in seq.ravel():
if not (subseq is Missing):
d.processSeq(subseq)
return True
registerTypeScanner(SetScanner)
class StringScanner(TypeScanner):
good_cls = set([str, unicode, MissingType, numpy.string_, numpy.unicode_])
unicode_cls = set([unicode, numpy.unicode_])
def __init__(self, detector):
TypeScanner.__init__(self, detector)
self.max_nchars = 0
def getType(self):
if self.unicode_cls.isdisjoint(self.detector.objectclss):
ntype = rtypes.TypeBytes
else:
ntype = rtypes.TypeString
if self.max_nchars < 32:
d = dimensions.Dim(self.max_nchars, tuple(), self.detector.hasMissing())
else:
d = dimensions.Dim(UNDEFINED, (True,) * len(self.getDimReps(0)), self.detector.hasMissing())
dims = dimpaths.DimPath(d)
return ntype(self.detector.hasMissing(), dims)
def scan(self, seq):
has_missing = self.detector.hasMissing()
if not self.detector.objectclss.issubset(self.good_cls):
return False
if(self.max_nchars < 32):
max_nchars = seq.map(len, otype=int, out_empty=-1, has_missing=has_missing).max(has_missing=False)
self.max_nchars = max(self.max_nchars, max_nchars)
return True
registerTypeScanner(StringScanner)
class StringRealScanner(StringScanner):
need_convert=True
parentcls=StringScanner
missing_str = set(["", "NA", "N/A","NaN", "nan", "--", "?", "null"])
def __init__(self, detector):
StringScanner.__init__(self, detector)
self.has_missing = False
def unregister(self, create_parent=False):
res = super(StringRealScanner,self).unregister(create_parent)
if create_parent:
res.max_nchars = self.max_nchars
return res
def getType(self):
return rtypes.TypeReal64(self.detector.hasMissing() or self.has_missing)
def scan(self, seq):
res = StringScanner.scan(self, seq)
if res:
for elem in seq.ravel():
try:
float(elem)
except (ValueError, TypeError):
if elem.__class__ in missing_cls or elem in self.missing_str:
self.has_missing = True
else:
return False
return res
registerTypeScanner(StringRealScanner)
class StringIntScanner(StringRealScanner):
parentcls=StringRealScanner
def unregister(self, create_parent=False):
res = super(StringIntScanner,self).unregister(create_parent)
if create_parent:
res.has_missing = self.has_missing
return res
def getType(self):
return rtypes.TypeInt64(self.detector.hasMissing() or self.has_missing)
def scan(self, seq):
res = StringScanner.scan(self, seq)
if res:
for elem in seq.ravel():
try:
int(elem)
except (ValueError, TypeError):
if elem.__class__ in missing_cls or elem in self.missing_str:
self.has_missing = True
else:
return False
return res
registerTypeScanner(StringIntScanner)
class StringProteinScanner(StringScanner):
need_convert=False
parentcls=StringScanner
regmatch = re.compile('^[AaBbCcDdEeFfGgHhIiKkLlMmNnOoPpQqRrSsTtUuVvWwYyZzXx\*\-]+$')
ntype = rtypes.TypeProteinSequence
def __init__(self, detector):
StringScanner.__init__(self, detector);
self.has_missing = False;
self.evidence = 0
self.nelem = 0
def unregister(self, create_parent=False):
res = super(StringProteinScanner,self).unregister(create_parent)
if create_parent:
res.max_nchars = self.max_nchars
return res
def getType(self):
if not self.nelem or self.evidence / self.nelem <= 25:
if self.nelem:
util.info('Detecting possible protein sequences as string type due to short averaged length (%d).\n' % (self.evidence / self.nelem))
return StringScanner.getType(self)
ntype = self.ntype
d = dimensions.Dim(UNDEFINED, (True,) * len(self.getDimReps(0)), self.detector.hasMissing())
dims = dimpaths.DimPath(d)
return ntype(self.detector.hasMissing() or self.has_missing, dims)
def scan(self, seq):
res = StringScanner.scan(self, seq)
rm = self.regmatch
if res:
minsize = numpy.inf
minseq = ""
for elem in seq.ravel():
if not elem:
self.has_missing = True;
continue
if rm.match(elem) is None:
return False
size = len(elem)
if size:
self.evidence += size
self.nelem += 1
if minsize < 25:
return False
return res
registerTypeScanner(StringProteinScanner);
class StringDNAScanner(StringProteinScanner):
need_convert=False
parentcls=StringProteinScanner
regmatch = re.compile('^[acgtnACGTNurykmswbdhvnx\-URYKMSWBDHVNX]+$')
ntype = rtypes.TypeDNASequence
def unregister(self, create_parent=False):
res = super(StringDNAScanner,self).unregister(create_parent)
if create_parent:
res.has_missing = self.has_missing
res.evidence = self.evidence
res.nelem = self.nelem
return res
def getType(self):
if not self.nelem:
return StringScanner.getType(self)
ntype = self.ntype
d = dimensions.Dim(UNDEFINED, (True,) * len(self.getDimReps(0)), self.detector.hasMissing())
dims = dimpaths.DimPath(d)
return ntype(self.detector.hasMissing() or self.has_missing, dims)
def scan(self, seq):
res = StringScanner.scan(self, seq)
rm = self.regmatch
if res:
for elem in seq.ravel():
if not elem:
self.has_missing = True;
continue
if rm.match(elem) is None:
return False
size = len(elem)
if size:
self.evidence += size
self.nelem += 1
return res
registerTypeScanner(StringDNAScanner)
class SliceScanner(TypeScanner):
__doc__ = 'Slice scanner'
typecls = rtypes.TypeSlice
good_cls = set((slice, MissingType))
def __init__(self, detector):
TypeScanner.__init__(self, detector)
def getType(self):
if not self.detector.hasMissing():
pass
has_missing = self.detector.hasMissing()
return self.typecls(has_missing)
def scan(self, seq):
if not self.detector.objectclss.issubset(self.good_cls):
return False
return True
registerTypeScanner(SliceScanner)
class NumberScanner(TypeScanner):
__doc__ = 'Number scanner, accepts all number objects.'
typecls = rtypes.TypeNumber
good_cls = set((bool, float, complex, long, int, numpy.int8, numpy.int16, numpy.int32, numpy.int64, numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64, numpy.float32, numpy.float64, numpy.complex64, numpy.complex128, numpy.bool_, MissingType))
def __init__(self, detector):
TypeScanner.__init__(self, detector)
self.has_nan = False
def getType(self):
if not self.detector.hasMissing():
pass
has_missing = self.detector.hasMissing() or self.has_nan
return self.typecls(has_missing)
def scan(self, seq):
if not self.detector.objectclss.issubset(self.good_cls):
return False
if not self.has_nan:
self.has_nan = numpy.nan in seq
return True
registerTypeScanner(NumberScanner)
class RealScanner(NumberScanner):
__doc__ = 'Real scanner, accepts all real and integer objects.'
parentcls = NumberScanner
typecls = rtypes.TypeReal64
good_cls = set((float, bool, numpy.float32, numpy.float64, MissingType, long, int, numpy.int8, numpy.int16, numpy.int32, numpy.int64, numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64, numpy.bool_))
registerTypeScanner(RealScanner)
class IntegerScanner(TypeScanner):
parentcls = RealScanner
ustepvals = [256, 65536, 4294967296L, 18446744073709551616L]
uinttypes = [rtypes.TypeUInt8, rtypes.TypeUInt16, rtypes.TypeUInt32, rtypes.TypeUInt64]
istepvals = [128, 32768, 2147483648L, 9223372036854775808L]
inttypes = [rtypes.TypeInt8, rtypes.TypeInt16, rtypes.TypeInt32, rtypes.TypeInt64]
good_cls = set((bool, long, int, numpy.int8, numpy.int16, numpy.int32, numpy.int64, numpy.uint8, numpy.uint16, numpy.uint32, numpy.uint64, numpy.bool_, MissingType))
numpy_minmax = {numpy.dtype('bool'): (0, 1), numpy.dtype('int8'): (-128, 127), numpy.dtype('uint8'): (0, 255), numpy.dtype('int16'): (-32768, 32767), numpy.dtype('uint16'): (0, 65535), numpy.dtype('int32'): (-2147483648, 2147483647), numpy.dtype('uint32'): (0, 4294967295L), numpy.dtype('int64'): (-9223372036854775808L, 9223372036854775807L), numpy.dtype('uint64'): (0, 18446744073709551615L)}
array_minmax = {'b': (-128, 127), 'B': (0, 255), 'h': (-32768, 32767), 'H': (0, 65535), 'i': (-2147483648, 2147483647), 'I': (0, 4294967295L), 'l': (-2147483648, 2147483647), 'L': (0, 4294967295L)}
def __init__(self, detector):
TypeScanner.__init__(self, detector)
self.max_val = 0
self.min_val = 0
def getType(self):
out_type = rtypes.TypeInteger
if self.min_val >= 0:
for stepval, rtype in zip(self.ustepvals, self.uinttypes):
if self.max_val < stepval:
out_type = rtype
break
else:
for stepval, rtype in zip(self.istepvals, self.inttypes):
if -self.min_val <= stepval and self.max_val < stepval:
out_type = rtype
break
return out_type(self.detector.hasMissing())
def scan(self, seq):
if not self.detector.objectclss.issubset(self.good_cls):
return False
if isinstance(seq, numpy.ndarray) and not seq.dtype == object:
minmax = self.numpy_minmax[seq.dtype]
elif isinstance(seq, array.array):
minmax = self.array_minmax[seq.typecode]
elif len(seq) == 0:
minmax = (0, 0)
else:
minmax = (0,0)
for cls in self.detector.objectclss:
if cls is MissingType:
continue
tminmax = self.numpy_minmax[numpy.dtype(int)]
minmax = (min(tminmax[0],minmax[0]),max(tminmax[1],minmax[1]))
self.min_val = min(minmax[0], self.min_val)
self.max_val = max(minmax[1], self.max_val)
return True
registerTypeScanner(IntegerScanner)
class BoolScanner(TypeScanner):
parentcls = IntegerScanner
typecls = rtypes.TypeBool
good_cls = set((numpy.bool_, bool, MissingType))
registerTypeScanner(BoolScanner) | PypiClean |
/node_managment_application-0.0.1.tar.gz/node_managment_application-0.0.1/nms_app/static/admin/js/nav_sidebar.js | 'use strict';
{
const toggleNavSidebar = document.getElementById('toggle-nav-sidebar');
if (toggleNavSidebar !== null) {
const navSidebar = document.getElementById('nav-sidebar');
const main = document.getElementById('main');
let navSidebarIsOpen = localStorage.getItem('django.admin.navSidebarIsOpen');
if (navSidebarIsOpen === null) {
navSidebarIsOpen = 'true';
}
main.classList.toggle('shifted', navSidebarIsOpen === 'true');
navSidebar.setAttribute('aria-expanded', navSidebarIsOpen);
toggleNavSidebar.addEventListener('click', function() {
if (navSidebarIsOpen === 'true') {
navSidebarIsOpen = 'false';
} else {
navSidebarIsOpen = 'true';
}
localStorage.setItem('django.admin.navSidebarIsOpen', navSidebarIsOpen);
main.classList.toggle('shifted');
navSidebar.setAttribute('aria-expanded', navSidebarIsOpen);
});
}
function initSidebarQuickFilter() {
const options = [];
const navSidebar = document.getElementById('nav-sidebar');
if (!navSidebar) {
return;
}
navSidebar.querySelectorAll('th[scope=row] a').forEach((container) => {
options.push({title: container.innerHTML, node: container});
});
function checkValue(event) {
let filterValue = event.target.value;
if (filterValue) {
filterValue = filterValue.toLowerCase();
}
if (event.key === 'Escape') {
filterValue = '';
event.target.value = ''; // clear input
}
let matches = false;
for (const o of options) {
let displayValue = '';
if (filterValue) {
if (o.title.toLowerCase().indexOf(filterValue) === -1) {
displayValue = 'none';
} else {
matches = true;
}
}
// show/hide parent <TR>
o.node.parentNode.parentNode.style.display = displayValue;
}
if (!filterValue || matches) {
event.target.classList.remove('no-results');
} else {
event.target.classList.add('no-results');
}
sessionStorage.setItem('django.admin.navSidebarFilterValue', filterValue);
}
const nav = document.getElementById('nav-filter');
nav.addEventListener('change', checkValue, false);
nav.addEventListener('input', checkValue, false);
nav.addEventListener('keyup', checkValue, false);
const storedValue = sessionStorage.getItem('django.admin.navSidebarFilterValue');
if (storedValue) {
nav.value = storedValue;
checkValue({target: nav, key: ''});
}
}
window.initSidebarQuickFilter = initSidebarQuickFilter;
initSidebarQuickFilter();
} | PypiClean |
/GoogleLocationUtility-0.1.0a1.tar.gz/GoogleLocationUtility-0.1.0a1/docs/Location Processing.md | #### Function Pages
[Location Reporting](/docs/Location%20Reporting.md)
• [Location Filtering](/docs/Location%20Filtering.md)
• [geoTag and geoStrip](/docs/geoTag.md)
• [Location Mapping](/docs/Mapping.md)
[Getting Started](/docs#getting-started)
# Location Processing
[location_parse.py](/src/GLU/location_parse.py)
**Contents**:
[Invocation](#invocation) • [Requirements](#requirements) • [Operations](#operations) •
[Outputs](#outputs) • [CLI Example](#example) • [Data Descriptions](#location-history-data-descriptions)
## Invocation
`home --loc_parse`
`home -p`
## Requirements
1. *Records.json* in **LocationData** directory
## Operations
1. *Records.json* --> dictionary --> DataFrame
- May take some time and will consume RAM
2. **Cleaning**
- Remove all but GPS coordinates, accuracy, source, deviceTag, timestamp. See [location history data information](/docs/Location%20Processing.md#location-history-data-descriptions).
- Other information may be kept in future versions, in my case they were mostly empty.
- Remove records with negative accuracy values.
- Ensure **source** values are all uppercase, "WIFI" and "wifi" should be the same.
3. **Calculations, Conversions**
- Convert **source** and **deviceTag** data type to *category*.
- Convert **timestampMs** to pandas *datetime*.
- Calculate **timeStep** for all but first entry, pandas *timedelta* between a given record and the preceding record.
4. DataFrame --> parquet
- save processed data in **LocationData** directory for future use.
- optimized data types are persisted.
- parquet provides significant time savings when loading/saving data versus CSV.
5. **Exit Options**
- Create report or filter processed data while it is still in memory?
- y/n prompt to generate report, pressing anything other than "y" is interpreted as "n". Slight lag after input may occur.
- y/n prompt to filter data, pressing anything other than "y" is interpreted as "n".
## Outputs
1. *parsed_\<date\>.parquet* in **LocationData** directory
## Example
`home -p`
<details>
<summary>loc_parse CLI example</summary>

*Parsing operation for ~500MB Records.json file*
</details>
## Location History Data Descriptions
The following information is copied from *archive_browser.html*, included with Location History [Takeout](https://takeout.google.com/). **Bolded parameters** are persisted during processing.
> JSON
> The JSON Location History file describes device location signals and associated metadata collected while you were opted into Location History which you have not subsequently deleted.
> * locations: All location records.
> * **timestampMs(int64): Timestamp (UTC) in milliseconds for the recorded location.**
> * **latitudeE7(int32): The latitude value of the location in E7 format (degrees multiplied by 10**7 and rounded to the nearest integer).**
> * **longitudeE7(int32): The longitude value of the location in E7 format (degrees multiplied by 10**7 and rounded to the nearest integer).**
> * **accuracy(int32): Approximate location accuracy radius in meters.**
> * velocity(int32): Speed in meters per second.
> * heading(int32): Degrees east of true north.
> * altitude(int32): Meters above the WGS84 reference ellipsoid.
> * verticalAccuracy(int32): Vertical accuracy calculated in meters.
> * activity: Information about the activity at the location.
> * timestampMs(int64): Timestamp (UTC) in milliseconds for the recorded activity.
> * type: Description of the activity type.
> * confidence(int32): Confidence associated with the specified activity type.
> * **source(string): The source this location was derived from. Usually GPS, CELL or WIFI.**
> * **deviceTag(int32): An integer identifier (specific to Location History) associated with the device which uploaded the location.**
> * platform(string): The platform describing the device along with miscellaneous build information.
> * platformType(string): The platform type of the device. Either ANDROID, IOS or UNKNOWN.
> * locationMetadata: A repeated list of wifi scans consisting of access points. Each access point consists of the signal strength in dBm (decibels per milliwat) and the mac address of the access point.
| PypiClean |
/Newcalls-0.0.1-cp37-cp37m-win_amd64.whl/newcalls/node_modules/semver/classes/semver.js | const debug = require('../internal/debug')
const { MAX_LENGTH, MAX_SAFE_INTEGER } = require('../internal/constants')
const { safeRe: re, t } = require('../internal/re')
const parseOptions = require('../internal/parse-options')
const { compareIdentifiers } = require('../internal/identifiers')
class SemVer {
constructor (version, options) {
options = parseOptions(options)
if (version instanceof SemVer) {
if (version.loose === !!options.loose &&
version.includePrerelease === !!options.includePrerelease) {
return version
} else {
version = version.version
}
} else if (typeof version !== 'string') {
throw new TypeError(`Invalid version. Must be a string. Got type "${typeof version}".`)
}
if (version.length > MAX_LENGTH) {
throw new TypeError(
`version is longer than ${MAX_LENGTH} characters`
)
}
debug('SemVer', version, options)
this.options = options
this.loose = !!options.loose
// this isn't actually relevant for versions, but keep it so that we
// don't run into trouble passing this.options around.
this.includePrerelease = !!options.includePrerelease
const m = version.trim().match(options.loose ? re[t.LOOSE] : re[t.FULL])
if (!m) {
throw new TypeError(`Invalid Version: ${version}`)
}
this.raw = version
// these are actually numbers
this.major = +m[1]
this.minor = +m[2]
this.patch = +m[3]
if (this.major > MAX_SAFE_INTEGER || this.major < 0) {
throw new TypeError('Invalid major version')
}
if (this.minor > MAX_SAFE_INTEGER || this.minor < 0) {
throw new TypeError('Invalid minor version')
}
if (this.patch > MAX_SAFE_INTEGER || this.patch < 0) {
throw new TypeError('Invalid patch version')
}
// numberify any prerelease numeric ids
if (!m[4]) {
this.prerelease = []
} else {
this.prerelease = m[4].split('.').map((id) => {
if (/^[0-9]+$/.test(id)) {
const num = +id
if (num >= 0 && num < MAX_SAFE_INTEGER) {
return num
}
}
return id
})
}
this.build = m[5] ? m[5].split('.') : []
this.format()
}
format () {
this.version = `${this.major}.${this.minor}.${this.patch}`
if (this.prerelease.length) {
this.version += `-${this.prerelease.join('.')}`
}
return this.version
}
toString () {
return this.version
}
compare (other) {
debug('SemVer.compare', this.version, this.options, other)
if (!(other instanceof SemVer)) {
if (typeof other === 'string' && other === this.version) {
return 0
}
other = new SemVer(other, this.options)
}
if (other.version === this.version) {
return 0
}
return this.compareMain(other) || this.comparePre(other)
}
compareMain (other) {
if (!(other instanceof SemVer)) {
other = new SemVer(other, this.options)
}
return (
compareIdentifiers(this.major, other.major) ||
compareIdentifiers(this.minor, other.minor) ||
compareIdentifiers(this.patch, other.patch)
)
}
comparePre (other) {
if (!(other instanceof SemVer)) {
other = new SemVer(other, this.options)
}
// NOT having a prerelease is > having one
if (this.prerelease.length && !other.prerelease.length) {
return -1
} else if (!this.prerelease.length && other.prerelease.length) {
return 1
} else if (!this.prerelease.length && !other.prerelease.length) {
return 0
}
let i = 0
do {
const a = this.prerelease[i]
const b = other.prerelease[i]
debug('prerelease compare', i, a, b)
if (a === undefined && b === undefined) {
return 0
} else if (b === undefined) {
return 1
} else if (a === undefined) {
return -1
} else if (a === b) {
continue
} else {
return compareIdentifiers(a, b)
}
} while (++i)
}
compareBuild (other) {
if (!(other instanceof SemVer)) {
other = new SemVer(other, this.options)
}
let i = 0
do {
const a = this.build[i]
const b = other.build[i]
debug('prerelease compare', i, a, b)
if (a === undefined && b === undefined) {
return 0
} else if (b === undefined) {
return 1
} else if (a === undefined) {
return -1
} else if (a === b) {
continue
} else {
return compareIdentifiers(a, b)
}
} while (++i)
}
// preminor will bump the version up to the next minor release, and immediately
// down to pre-release. premajor and prepatch work the same way.
inc (release, identifier, identifierBase) {
switch (release) {
case 'premajor':
this.prerelease.length = 0
this.patch = 0
this.minor = 0
this.major++
this.inc('pre', identifier, identifierBase)
break
case 'preminor':
this.prerelease.length = 0
this.patch = 0
this.minor++
this.inc('pre', identifier, identifierBase)
break
case 'prepatch':
// If this is already a prerelease, it will bump to the next version
// drop any prereleases that might already exist, since they are not
// relevant at this point.
this.prerelease.length = 0
this.inc('patch', identifier, identifierBase)
this.inc('pre', identifier, identifierBase)
break
// If the input is a non-prerelease version, this acts the same as
// prepatch.
case 'prerelease':
if (this.prerelease.length === 0) {
this.inc('patch', identifier, identifierBase)
}
this.inc('pre', identifier, identifierBase)
break
case 'major':
// If this is a pre-major version, bump up to the same major version.
// Otherwise increment major.
// 1.0.0-5 bumps to 1.0.0
// 1.1.0 bumps to 2.0.0
if (
this.minor !== 0 ||
this.patch !== 0 ||
this.prerelease.length === 0
) {
this.major++
}
this.minor = 0
this.patch = 0
this.prerelease = []
break
case 'minor':
// If this is a pre-minor version, bump up to the same minor version.
// Otherwise increment minor.
// 1.2.0-5 bumps to 1.2.0
// 1.2.1 bumps to 1.3.0
if (this.patch !== 0 || this.prerelease.length === 0) {
this.minor++
}
this.patch = 0
this.prerelease = []
break
case 'patch':
// If this is not a pre-release version, it will increment the patch.
// If it is a pre-release it will bump up to the same patch version.
// 1.2.0-5 patches to 1.2.0
// 1.2.0 patches to 1.2.1
if (this.prerelease.length === 0) {
this.patch++
}
this.prerelease = []
break
// This probably shouldn't be used publicly.
// 1.0.0 'pre' would become 1.0.0-0 which is the wrong direction.
case 'pre': {
const base = Number(identifierBase) ? 1 : 0
if (!identifier && identifierBase === false) {
throw new Error('invalid increment argument: identifier is empty')
}
if (this.prerelease.length === 0) {
this.prerelease = [base]
} else {
let i = this.prerelease.length
while (--i >= 0) {
if (typeof this.prerelease[i] === 'number') {
this.prerelease[i]++
i = -2
}
}
if (i === -1) {
// didn't increment anything
if (identifier === this.prerelease.join('.') && identifierBase === false) {
throw new Error('invalid increment argument: identifier already exists')
}
this.prerelease.push(base)
}
}
if (identifier) {
// 1.2.0-beta.1 bumps to 1.2.0-beta.2,
// 1.2.0-beta.fooblz or 1.2.0-beta bumps to 1.2.0-beta.0
let prerelease = [identifier, base]
if (identifierBase === false) {
prerelease = [identifier]
}
if (compareIdentifiers(this.prerelease[0], identifier) === 0) {
if (isNaN(this.prerelease[1])) {
this.prerelease = prerelease
}
} else {
this.prerelease = prerelease
}
}
break
}
default:
throw new Error(`invalid increment argument: ${release}`)
}
this.raw = this.format()
if (this.build.length) {
this.raw += `+${this.build.join('.')}`
}
return this
}
}
module.exports = SemVer | PypiClean |
/JLpy_utils_package-2.0.5-py3-none-any.whl/JLpy_utils_package/img.py | import numpy as np
import skimage, skimage.transform, skimage.restoration, skimage.measure, skimage.feature
import matplotlib.pyplot as plt
import matplotlib as mpl
import sys
try:
import cv2
except ImportError:
sys.exit("""You need cv2. run: '!pip install opencv-python' """)
# from transform import rescale, resize, downscale_local_mean
def resize_img(img, y_size, x_size):
return skimage.transform.resize(img, (y_size,x_size),mode= 'reflect')
def denoise_img(img):
return skimage.restoration.denoise_tv_chambolle(img)
def build_crop_array(img,yx_min,yx_max,padding, use_square = False):
y_min_index = max(yx_min[0]-padding,0)
x_min_index = max(yx_min[1]-padding,0)
y_max_index = min(yx_max[0]+padding,img.shape[0])
x_max_index = min(yx_max[1]+padding,img.shape[1])
crop_array = [y_min_index, y_max_index, x_min_index, x_max_index]
if use_square:
mean_width = np.mean((crop_array[1]-crop_array[0],crop_array[3]-crop_array[2]))
x_offset = mean_width - (crop_array[1]-crop_array[0])
y_offset = mean_width - (crop_array[3]-crop_array[2])
crop_array[0] = crop_array[0]-int(x_offset/2)
crop_array[1] = crop_array[1]+int(x_offset/2)
crop_array[2] = crop_array[2]-int(y_offset/2)
crop_array[3] = crop_array[3]+int(y_offset/2)
return crop_array
def find_img_contours_and_cropping_array(img, contour_level = 0.1, padding = 50, use_square = False):
# Find contours
contours = skimage.measure.find_contours(img, level = contour_level)
if contours == []:
yx_max = [img.shape[0]-1,img.shape[1]-1]
yx_min = [0,0]
else:
#get corner indices
yx_max = np.array([[contours[i][:, 0].max(), contours[i][:, 1].max()] for i in range(len(contours))])
yx_max = [int(yx_max[:,0].max()),int(yx_max[:,1].max())]
yx_min = np.array([[contours[i][:, 0].min(), contours[i][:, 1].min()] for i in range(len(contours))])
yx_min = [int(yx_min[:,0].min()), int(yx_min[:,1].min())]
#Build Cropping array
crop_array = build_crop_array(img, yx_min, yx_max, padding, use_square = use_square)
return contours, crop_array
def preprocess_img(img,
y_size_resize1 = 512,
y_size_resize2 = 256,
plot_steps = False
):
if plot_steps == True:
plt.subplot(421)
plt.imshow(img)
plt.title('(1) original img')
#fetch gray scale img
img_gray = skimage.color.rgb2gray(img)
if plot_steps == True:
plt.subplot(422)
plt.imshow(img_gray, cmap = 'binary')
plt.title('(2) gray img')
#perform 1st resize
y_size = y_size_resize1
img_resized = resize_img(img,
y_size=y_size,
x_size = int(y_size*img.shape[1]/img.shape[0]))
img_gray_resized = resize_img(img_gray,
y_size=y_size,
x_size = int(y_size*img.shape[1]/img.shape[0]))
if plot_steps == True:
plt.subplot(423)
plt.imshow(img_gray_resized,cmap='binary')
plt.title('(3) scaled for y_size = '+str(y_size))
#denoise img
img_gray_resized_denoised = denoise_img(img_gray_resized)
if plot_steps == True:
plt.subplot(424)
plt.imshow(img_gray_resized_denoised,cmap = 'binary')
plt.title('(4) denoised img')
contours, crop_array = find_img_contours_and_cropping_array(img_gray_resized_denoised,
contour_level = 0.1,
padding = 50)
if plot_steps == True:
plt.subplot(425)
plt.imshow(img_gray_resized_denoised, interpolation='nearest', cmap='binary')
for n, contour in enumerate(contours):
plt.plot(contour[:, 1], contour[:, 0], linewidth=1, color = 'r')
plt.plot(crop_array[2:4],crop_array[0:2],'bo')
plt.title('(5) Cropping pts: '+str(crop_array))
#crop images
img_gray_resized_cropped = img_gray_resized[crop_array[0]:crop_array[1],crop_array[2]:crop_array[3]]
img_resized_cropped = img_resized[crop_array[0]:crop_array[1],crop_array[2]:crop_array[3]]
if plot_steps == True:
plt.subplot(426)
plt.imshow(img_resized_cropped)
plt.title('(6) cropped img')
#resize the cropped image
y_size = y_size_resize2
img_resized_cropped_resized = resize_img(img_resized_cropped,
y_size=y_size,
x_size = y_size)
if plot_steps == True:
plt.subplot(427)
plt.imshow(img_resized_cropped_resized)
plt.title('(7 (final)) resized for xy_size = '+str(y_size))
return img_resized_cropped_resized
class auto_crop():
"""
This class contains helper functions for autocropping and image
"""
def use_countours(img,
padding = 50,
show_plots = {'processed':True,
'processing steps':False},
use_square=False,
contour_level_max_offset_scalar = 2):
"""
Wrapper to make img cropping simpler. The function converts the img to grayscale, runs the "find_img_countours_and_cropping_array" function, and applies the cropping to the original img (RGB) via img_cropped = img[crop_array[0]:crop_array[1],crop_array[2]:crop_array[3]]. img_cropped is then returned.
"""
img = img/img.max()
img_gray = skimage.color.rgb2gray(img)
img_gray = img_gray/img_gray.max()
contour_level = img_gray.max()/contour_level_max_offset_scalar
contours, crop_array = find_img_contours_and_cropping_array(img_gray,
contour_level = contour_level,
padding = padding,
use_square = use_square)
img_cropped = img[crop_array[0]:crop_array[1],crop_array[2]:crop_array[3]]
if show_plots['processing steps']:
#original image
plt.title(title+'\noriginal img')
plt.imshow(img)
plt.grid(which='both')
plt.axis('off')
plt.show()
#gray with cropping points and contours
plt.imshow(img_gray, interpolation='nearest', cmap='binary')
for n, contour in enumerate(contours):
plt.plot(contour[:, 1], contour[:, 0], linewidth=1, color = 'r')
plt.plot(crop_array[2:4],crop_array[0:2],'bo')
plt.title('Cropping pts: '+str(crop_array))
plt.grid(which='both')
plt.axis('off')
plt.show()
if show_plots['processed']:
plt.title(img.shape)
plt.imshow(img_cropped)
plt.grid(which='both')
plt.axis('off')
plt.show()
return img_cropped
def use_edges(img,
edges_dict = {'sigma':10,
'low_threshold':None,
'high_threshold':None},
padding = (0,0),
show_plots = False,
verbose = 0):
"""
Use skimage.feature.canny method to find edges in the img passed. prior to edge finding, the img is converted to grayscale.
Arguments:
img: RGB img
edges_dict: dictionary containing 'sigma', 'low_threshold', 'high_threshold' settings passed to the canny edge detection method.
padding: # of pixels you want to pad on the edges found by the canny edge filter
show_plots: boolean to show or not show plots
verbose: integer. Higher value will print more processing statements/info.
Returns:
img_cropped: RGB img with cropping applied
img_cropped_gray: grayscale image with cropping applied.
"""
# instantiate img plot
if show_plots:
fig, ax_list = plt.subplots(1,4)
i=0
ax_list[i].set_title('original img')
ax_list[i].imshow(img)
ax_list[i].grid(which='both', visible=False)
ax_list[i].axis('off')
i+=1
#convert to grayscale
img_gray = skimage.color.rgb2gray(img)
img_gray = img_gray/np.mean(img_gray.flatten()) # mean normalized
if show_plots:
ax_list[i].set_title('grayscale img')
ax_list[i].imshow(img_gray)
ax_list[i].grid(which='both', visible=False)
ax_list[i].axis('off')
i+=1
#find edges
edges = skimage.feature.canny(img_gray,
sigma = edges_dict['sigma'],
low_threshold = edges_dict['low_threshold'],
high_threshold = edges_dict['high_threshold'],
mask=None,
use_quantiles=False)
if show_plots:
ax_list[i].set_title('edges')
ax_list[i].imshow(edges)
ax_list[i].grid(which='both', visible=False)
ax_list[i].axis('off')
i+=1
#fetch indices of coner edges
edge_indices = np.where(edges==True)
if edge_indices[0].shape[0] != 0 and edge_indices[1].shape[0] != 0 :
ylim = (np.min(edge_indices[0])-padding[0],np.max(edge_indices[0])+padding[0])
xlim = (np.min(edge_indices[1])-padding[1],np.max(edge_indices[1])+padding[1])
#plot cropped image
img_cropped = img[ylim[0]:ylim[1], xlim[0]:xlim[1],:]
img_cropped_gray = img_gray[ylim[0]:ylim[1], xlim[0]:xlim[1]]
else:
img_cropped = img
img_cropped_gray = img_gray
if show_plots:
ax_list[i].set_title('cropped img')
ax_list[i].imshow(img_cropped)
ax_list[i].grid(which='both', visible=False)
ax_list[i].axis('off')
i+=1
if show_plots:
fig.tight_layout(rect=(0,0,3,1))
plt.show()
else:
plt.close()
if verbose>=1:
print('img.shape:',img.shape)
print('img_cropped.shape',img_cropped.shape)
print('img reduction factor:', np.prod(img.shape)/np.prod(img_cropped.shape))
return img_cropped, img_cropped_gray
def autocrop_and_downscale(img, target_min_dim = 256, verbose = 0):
"""
Apply edges-based autocropping and downscale using local mean to reduce the min dimension of an image to be equal to the 'target_min_dimension' argument
Arguments:
img: RGB or gray-scale
target_min_dim: integer. default: 256. min dimension for the output image. If the image is rectangular, the longer axis will be scaled by the same amoutn as the shorter axis such that the output image is not distorted.
verbose: integer. default: 0. verbosity of print statements
Returns:
img_autocrop_downscale: RGB image
"""
img_autocrop, _ = auto_crop.use_edges(img, show_plots = False, verbose=0)
#fetch xy dimensions of autocropped image
dims = list(img_autocrop.shape)[:2]
#calculate downscale factors
if len(img.shape)==3:
downscale_factors = (int(np.min(dims)/target_min_dim), int(np.min(dims)/target_min_dim), 1)
else:
downscale_factors = (int(np.min(dims)/target_min_dim), int(np.min(dims)/target_min_dim))
if img_autocrop.max()>1:
img_autocrop = img_autocrop/255
#downscale img
img_autocrop_downscale = skimage.transform.downscale_local_mean(img_autocrop, downscale_factors)
if verbose>=1:
print('img.shape:',img.shape)
print('img_autocrop.shape:',img_autocrop.shape)
print('img_autocrop_downscale.shape:',img_autocrop_downscale.shape)
print('img size reduction factor:', round(np.prod(img.shape)/np.prod(img_autocrop_downscale.shape),0))
return img_autocrop_downscale
def decompose_video_to_img(path_video,
show_plots = True,
verbose = 1):
if verbose>=1:
print(os.path.split(path_video)[1])
#fetch video object
cap = cv2.VideoCapture(path_video)
propid_dict = {'frame_width':3,
'frame_height':4,
'fps':5,
'frame_count':7,
'convert_to_RGB':16}
prop_dict = {}
for key in propid_dict.keys():
prop_dict[key] = cap.get(propid_dict[key])
if verbose>=1: print('\t',key,':', prop_dict[key])
#make subfolder for frames
path_frames_folder = os.path.join(path_artwork, os.path.splitext(file)[0])
if os.path.isdir(path_frames_folder)==False:
os.makedirs(path_frames_folder)
if show_plots: #instantiate plots
fig, ax_list = plt.subplots(1,5)
p=0
#build dummy img
img_dummy = np.zeros((int(prop_dict['frame_height']),int(prop_dict['frame_width']),3)).astype(int)+255
for i in range(int(prop_dict['frame_count'])):
retval, img = cap.read()
#check if the video is encoded as RGB
if bool(prop_dict['convert_to_RGB'])==False:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
#save the img
filename = 'frame_'+str(i)+'.png'
path_file = os.path.join(path_frames_folder,filename)
cv2.imwrite(path_file,img)
if verbose>=1:
None
if i%round(prop_dict['fps'])==0: #only show frame approx every second
if show_plots:
ax_list[p].imshow(img)
ax_list[p].grid(which='both',visible=False)
ax_list[p].axis('off')
if p+1>len(ax_list)-1:
p=0
fig.tight_layout(rect=(0,0,2.5,1))
plt.show()
fig, ax_list = plt.subplots(1,5)
for ax in ax_list: #fill in dummy imgs to prevent irregular formatting at end of frame list
ax.imshow(img_dummy)
ax.grid(which='both',visible=False)
ax.axis('off')
else: p+=1
fig.tight_layout(rect=(0,0,2.5,1))
plt.show()
cap.release()
cv2.destroyAllWindows() | PypiClean |
/Housing_Price_Prediction-0.4-py3-none-any.whl/house_pricing/ingest_data.py | 0~"""
This module contains helper functions for ingestion of data.
Running this standalone downloads the housing data and stores preprocessed copies of it in the specified folders.
"""
import os
import tarfile
from argparse import ArgumentParser, Namespace
from logging import Logger
import numpy as np
import pandas as pd
from six.moves import urllib
from sklearn.impute import SimpleImputer
from sklearn.model_selection import StratifiedShuffleSplit
from housing_price.logger import configure_logger
def parse_args() -> Namespace:
"""Commandline argument parser for standalone run.
Returns
-------
arparse.Namespace
Commandline arguments. Contains keys: ["raw": str,
"processed": str,
"log_level": str,
"no_console_log": bool,
"log_path": str]
"""
parser = ArgumentParser()
parser.add_argument(
"-r",
"--raw",
type=str,
default="data/raw/",
help="Path to raw dataset.",
)
parser.add_argument(
"-p",
"--processed",
type=str,
default="data/processed/",
help="Path to processed dataset.",
)
parser.add_argument("--log-level", type=str, default="DEBUG")
parser.add_argument("--no-console-log", action="store_true")
parser.add_argument("--log-path", type=str, default="")
return parser.parse_args()
def fetch_housing_data(housing_url: str, housing_path: str) -> None:
"""Function to download and extract housing data.
Parameters
----------
housing_url : str
Url to download the housing data from.
housing_path : str
Path to store the raw csv files after extraction.
"""
os.makedirs(housing_path, exist_ok=True)
tgz_path = os.path.join(housing_path, "housing.tgz")
urllib.request.urlretrieve(housing_url, tgz_path)
housing_tgz = tarfile.open(tgz_path)
housing_tgz.extractall(path=housing_path)
housing_tgz.close()
os.remove(tgz_path)
def stratified_shuffle_split(
base_df: pd.DataFrame,
) -> tuple[pd.DataFrame, pd.DataFrame]:
"""Does stratified shuffle split on "income_cat" attribute of housing data.
Parameters
----------
base_df : pd.DataFrame
The dataframe to be split.
Returns
-------
tuple[pd.DataFrame, pd.DataFrame]
[train_dataset, test_dataset]
"""
base_df["income_cat"] = pd.cut(
base_df["median_income"],
bins=[0.0, 1.5, 3.0, 4.5, 6.0, np.inf],
labels=[1, 2, 3, 4, 5],
)
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(base_df, base_df["income_cat"]):
strat_train_set = base_df.loc[train_index]
strat_test_set = base_df.loc[test_index]
for set_ in (strat_test_set, strat_train_set):
set_.drop("income_cat", axis=1, inplace=True)
return (strat_train_set, strat_test_set)
def pre_process_data(
df: pd.DataFrame, imputer: SimpleImputer = None
) -> tuple[pd.DataFrame, SimpleImputer]:
"""Preprocesses the given dataframe. Imputes missing values with median.
Replaces categorical column "ocean_proximity" with onehot dummy variables.
Parameters
----------
df : pd.DataFrame
Dataframe to preprocess.
imputer : SimpleImputer, optional
Imputer that imputes missing values, by default None.
If None, new imputer is created and fit to the given dataframe.
Returns
-------
tuple[pd.DataFrame, SimpleImputer]
Index 0 is the preprocessed dataframe.
Index 1 is the SimpleImputer passed or fit on the dataframe if None is passed.
"""
df = pd.get_dummies(df, columns=["ocean_proximity"])
if imputer is None:
imputer = SimpleImputer(strategy="median")
imputer.fit(df)
data = imputer.transform(df)
df = pd.DataFrame(data, columns=df.columns, index=df.index)
df["rooms_per_household"] = df["total_rooms"] / df["households"]
df["bedrooms_per_room"] = df["total_bedrooms"] / df["total_rooms"]
df["population_per_household"] = df["population"] / df["households"]
return (df, imputer)
def run(args: Namespace, logger: Logger) -> None:
"""Does all the ingesting work (fetching, splitting, preprocessing).
Gets called if this module is run standalone.
Parameters
----------
args : Namespace
Commandline arguments from parse_args.
logger : Logger
Logger to log the state while running.
"""
DOWNLOAD_ROOT = (
"https://raw.githubusercontent.com/ageron/handson-ml/master/"
)
HOUSING_PATH = args.raw
HOUSING_URL = DOWNLOAD_ROOT + "datasets/housing/housing.tgz"
fetch_housing_data(HOUSING_URL, HOUSING_PATH)
logger.debug("Fetched housing data.")
housing_df = pd.read_csv(os.path.join(args.raw, "housing.csv"))
train_set, test_set = stratified_shuffle_split(housing_df)
logger.debug("Preprocessing...")
train_set, imputer = pre_process_data(train_set)
test_set, _ = pre_process_data(test_set, imputer)
logger.debug("Preprocessing finished.")
logger.debug("Saving datasets.")
os.makedirs(args.processed, exist_ok=True)
train_path = os.path.join(args.processed, "housing_train.csv")
train_set.to_csv(train_path)
logger.debug(f"Preprocessed train datasets stored at {train_path}.")
test_path = os.path.join(args.processed, "housing_test.csv")
test_set.to_csv(test_path)
logger.debug(f"Preprocessed test datasets stored at {test_path}.")
if __name__ == "__main__":
args = parse_args()
logger = configure_logger(
log_level=args.log_level,
log_file=args.log_path,
console=not args.no_console_log,
)
run(args, logger) | PypiClean |
/MetaCalls-0.0.5-cp310-cp310-manylinux2014_x86_64.whl/metacalls/node_modules/typescript/lib/protocol.d.ts | declare namespace ts.server.protocol {
const enum CommandTypes {
JsxClosingTag = "jsxClosingTag",
Brace = "brace",
BraceCompletion = "braceCompletion",
GetSpanOfEnclosingComment = "getSpanOfEnclosingComment",
Change = "change",
Close = "close",
/** @deprecated Prefer CompletionInfo -- see comment on CompletionsResponse */
Completions = "completions",
CompletionInfo = "completionInfo",
CompletionDetails = "completionEntryDetails",
CompileOnSaveAffectedFileList = "compileOnSaveAffectedFileList",
CompileOnSaveEmitFile = "compileOnSaveEmitFile",
Configure = "configure",
Definition = "definition",
DefinitionAndBoundSpan = "definitionAndBoundSpan",
Implementation = "implementation",
Exit = "exit",
FileReferences = "fileReferences",
Format = "format",
Formatonkey = "formatonkey",
Geterr = "geterr",
GeterrForProject = "geterrForProject",
SemanticDiagnosticsSync = "semanticDiagnosticsSync",
SyntacticDiagnosticsSync = "syntacticDiagnosticsSync",
SuggestionDiagnosticsSync = "suggestionDiagnosticsSync",
NavBar = "navbar",
Navto = "navto",
NavTree = "navtree",
NavTreeFull = "navtree-full",
/** @deprecated */
Occurrences = "occurrences",
DocumentHighlights = "documentHighlights",
Open = "open",
Quickinfo = "quickinfo",
References = "references",
Reload = "reload",
Rename = "rename",
Saveto = "saveto",
SignatureHelp = "signatureHelp",
FindSourceDefinition = "findSourceDefinition",
Status = "status",
TypeDefinition = "typeDefinition",
ProjectInfo = "projectInfo",
ReloadProjects = "reloadProjects",
Unknown = "unknown",
OpenExternalProject = "openExternalProject",
OpenExternalProjects = "openExternalProjects",
CloseExternalProject = "closeExternalProject",
UpdateOpen = "updateOpen",
GetOutliningSpans = "getOutliningSpans",
TodoComments = "todoComments",
Indentation = "indentation",
DocCommentTemplate = "docCommentTemplate",
CompilerOptionsForInferredProjects = "compilerOptionsForInferredProjects",
GetCodeFixes = "getCodeFixes",
GetCombinedCodeFix = "getCombinedCodeFix",
ApplyCodeActionCommand = "applyCodeActionCommand",
GetSupportedCodeFixes = "getSupportedCodeFixes",
GetApplicableRefactors = "getApplicableRefactors",
GetEditsForRefactor = "getEditsForRefactor",
OrganizeImports = "organizeImports",
GetEditsForFileRename = "getEditsForFileRename",
ConfigurePlugin = "configurePlugin",
SelectionRange = "selectionRange",
ToggleLineComment = "toggleLineComment",
ToggleMultilineComment = "toggleMultilineComment",
CommentSelection = "commentSelection",
UncommentSelection = "uncommentSelection",
PrepareCallHierarchy = "prepareCallHierarchy",
ProvideCallHierarchyIncomingCalls = "provideCallHierarchyIncomingCalls",
ProvideCallHierarchyOutgoingCalls = "provideCallHierarchyOutgoingCalls",
ProvideInlayHints = "provideInlayHints"
}
/**
* A TypeScript Server message
*/
interface Message {
/**
* Sequence number of the message
*/
seq: number;
/**
* One of "request", "response", or "event"
*/
type: "request" | "response" | "event";
}
/**
* Client-initiated request message
*/
interface Request extends Message {
type: "request";
/**
* The command to execute
*/
command: string;
/**
* Object containing arguments for the command
*/
arguments?: any;
}
/**
* Request to reload the project structure for all the opened files
*/
interface ReloadProjectsRequest extends Message {
command: CommandTypes.ReloadProjects;
}
/**
* Server-initiated event message
*/
interface Event extends Message {
type: "event";
/**
* Name of event
*/
event: string;
/**
* Event-specific information
*/
body?: any;
}
/**
* Response by server to client request message.
*/
interface Response extends Message {
type: "response";
/**
* Sequence number of the request message.
*/
request_seq: number;
/**
* Outcome of the request.
*/
success: boolean;
/**
* The command requested.
*/
command: string;
/**
* If success === false, this should always be provided.
* Otherwise, may (or may not) contain a success message.
*/
message?: string;
/**
* Contains message body if success === true.
*/
body?: any;
/**
* Contains extra information that plugin can include to be passed on
*/
metadata?: unknown;
/**
* Exposes information about the performance of this request-response pair.
*/
performanceData?: PerformanceData;
}
interface PerformanceData {
/**
* Time spent updating the program graph, in milliseconds.
*/
updateGraphDurationMs?: number;
/**
* The time spent creating or updating the auto-import program, in milliseconds.
*/
createAutoImportProviderProgramDurationMs?: number;
}
/**
* Arguments for FileRequest messages.
*/
interface FileRequestArgs {
/**
* The file for the request (absolute pathname required).
*/
file: string;
projectFileName?: string;
}
interface StatusRequest extends Request {
command: CommandTypes.Status;
}
interface StatusResponseBody {
/**
* The TypeScript version (`ts.version`).
*/
version: string;
}
/**
* Response to StatusRequest
*/
interface StatusResponse extends Response {
body: StatusResponseBody;
}
/**
* Requests a JS Doc comment template for a given position
*/
interface DocCommentTemplateRequest extends FileLocationRequest {
command: CommandTypes.DocCommentTemplate;
}
/**
* Response to DocCommentTemplateRequest
*/
interface DocCommandTemplateResponse extends Response {
body?: TextInsertion;
}
/**
* A request to get TODO comments from the file
*/
interface TodoCommentRequest extends FileRequest {
command: CommandTypes.TodoComments;
arguments: TodoCommentRequestArgs;
}
/**
* Arguments for TodoCommentRequest request.
*/
interface TodoCommentRequestArgs extends FileRequestArgs {
/**
* Array of target TodoCommentDescriptors that describes TODO comments to be found
*/
descriptors: TodoCommentDescriptor[];
}
/**
* Response for TodoCommentRequest request.
*/
interface TodoCommentsResponse extends Response {
body?: TodoComment[];
}
/**
* A request to determine if the caret is inside a comment.
*/
interface SpanOfEnclosingCommentRequest extends FileLocationRequest {
command: CommandTypes.GetSpanOfEnclosingComment;
arguments: SpanOfEnclosingCommentRequestArgs;
}
interface SpanOfEnclosingCommentRequestArgs extends FileLocationRequestArgs {
/**
* Requires that the enclosing span be a multi-line comment, or else the request returns undefined.
*/
onlyMultiLine: boolean;
}
/**
* Request to obtain outlining spans in file.
*/
interface OutliningSpansRequest extends FileRequest {
command: CommandTypes.GetOutliningSpans;
}
interface OutliningSpan {
/** The span of the document to actually collapse. */
textSpan: TextSpan;
/** The span of the document to display when the user hovers over the collapsed span. */
hintSpan: TextSpan;
/** The text to display in the editor for the collapsed region. */
bannerText: string;
/**
* Whether or not this region should be automatically collapsed when
* the 'Collapse to Definitions' command is invoked.
*/
autoCollapse: boolean;
/**
* Classification of the contents of the span
*/
kind: OutliningSpanKind;
}
/**
* Response to OutliningSpansRequest request.
*/
interface OutliningSpansResponse extends Response {
body?: OutliningSpan[];
}
/**
* A request to get indentation for a location in file
*/
interface IndentationRequest extends FileLocationRequest {
command: CommandTypes.Indentation;
arguments: IndentationRequestArgs;
}
/**
* Response for IndentationRequest request.
*/
interface IndentationResponse extends Response {
body?: IndentationResult;
}
/**
* Indentation result representing where indentation should be placed
*/
interface IndentationResult {
/**
* The base position in the document that the indent should be relative to
*/
position: number;
/**
* The number of columns the indent should be at relative to the position's column.
*/
indentation: number;
}
/**
* Arguments for IndentationRequest request.
*/
interface IndentationRequestArgs extends FileLocationRequestArgs {
/**
* An optional set of settings to be used when computing indentation.
* If argument is omitted - then it will use settings for file that were previously set via 'configure' request or global settings.
*/
options?: EditorSettings;
}
/**
* Arguments for ProjectInfoRequest request.
*/
interface ProjectInfoRequestArgs extends FileRequestArgs {
/**
* Indicate if the file name list of the project is needed
*/
needFileNameList: boolean;
}
/**
* A request to get the project information of the current file.
*/
interface ProjectInfoRequest extends Request {
command: CommandTypes.ProjectInfo;
arguments: ProjectInfoRequestArgs;
}
/**
* A request to retrieve compiler options diagnostics for a project
*/
interface CompilerOptionsDiagnosticsRequest extends Request {
arguments: CompilerOptionsDiagnosticsRequestArgs;
}
/**
* Arguments for CompilerOptionsDiagnosticsRequest request.
*/
interface CompilerOptionsDiagnosticsRequestArgs {
/**
* Name of the project to retrieve compiler options diagnostics.
*/
projectFileName: string;
}
/**
* Response message body for "projectInfo" request
*/
interface ProjectInfo {
/**
* For configured project, this is the normalized path of the 'tsconfig.json' file
* For inferred project, this is undefined
*/
configFileName: string;
/**
* The list of normalized file name in the project, including 'lib.d.ts'
*/
fileNames?: string[];
/**
* Indicates if the project has a active language service instance
*/
languageServiceDisabled?: boolean;
}
/**
* Represents diagnostic info that includes location of diagnostic in two forms
* - start position and length of the error span
* - startLocation and endLocation - a pair of Location objects that store start/end line and offset of the error span.
*/
interface DiagnosticWithLinePosition {
message: string;
start: number;
length: number;
startLocation: Location;
endLocation: Location;
category: string;
code: number;
/** May store more in future. For now, this will simply be `true` to indicate when a diagnostic is an unused-identifier diagnostic. */
reportsUnnecessary?: {};
reportsDeprecated?: {};
relatedInformation?: DiagnosticRelatedInformation[];
}
/**
* Response message for "projectInfo" request
*/
interface ProjectInfoResponse extends Response {
body?: ProjectInfo;
}
/**
* Request whose sole parameter is a file name.
*/
interface FileRequest extends Request {
arguments: FileRequestArgs;
}
/**
* Instances of this interface specify a location in a source file:
* (file, line, character offset), where line and character offset are 1-based.
*/
interface FileLocationRequestArgs extends FileRequestArgs {
/**
* The line number for the request (1-based).
*/
line: number;
/**
* The character offset (on the line) for the request (1-based).
*/
offset: number;
}
type FileLocationOrRangeRequestArgs = FileLocationRequestArgs | FileRangeRequestArgs;
/**
* Request refactorings at a given position or selection area.
*/
interface GetApplicableRefactorsRequest extends Request {
command: CommandTypes.GetApplicableRefactors;
arguments: GetApplicableRefactorsRequestArgs;
}
type GetApplicableRefactorsRequestArgs = FileLocationOrRangeRequestArgs & {
triggerReason?: RefactorTriggerReason;
kind?: string;
};
type RefactorTriggerReason = "implicit" | "invoked";
/**
* Response is a list of available refactorings.
* Each refactoring exposes one or more "Actions"; a user selects one action to invoke a refactoring
*/
interface GetApplicableRefactorsResponse extends Response {
body?: ApplicableRefactorInfo[];
}
/**
* A set of one or more available refactoring actions, grouped under a parent refactoring.
*/
interface ApplicableRefactorInfo {
/**
* The programmatic name of the refactoring
*/
name: string;
/**
* A description of this refactoring category to show to the user.
* If the refactoring gets inlined (see below), this text will not be visible.
*/
description: string;
/**
* Inlineable refactorings can have their actions hoisted out to the top level
* of a context menu. Non-inlineanable refactorings should always be shown inside
* their parent grouping.
*
* If not specified, this value is assumed to be 'true'
*/
inlineable?: boolean;
actions: RefactorActionInfo[];
}
/**
* Represents a single refactoring action - for example, the "Extract Method..." refactor might
* offer several actions, each corresponding to a surround class or closure to extract into.
*/
interface RefactorActionInfo {
/**
* The programmatic name of the refactoring action
*/
name: string;
/**
* A description of this refactoring action to show to the user.
* If the parent refactoring is inlined away, this will be the only text shown,
* so this description should make sense by itself if the parent is inlineable=true
*/
description: string;
/**
* A message to show to the user if the refactoring cannot be applied in
* the current context.
*/
notApplicableReason?: string;
/**
* The hierarchical dotted name of the refactor action.
*/
kind?: string;
}
interface GetEditsForRefactorRequest extends Request {
command: CommandTypes.GetEditsForRefactor;
arguments: GetEditsForRefactorRequestArgs;
}
/**
* Request the edits that a particular refactoring action produces.
* Callers must specify the name of the refactor and the name of the action.
*/
type GetEditsForRefactorRequestArgs = FileLocationOrRangeRequestArgs & {
refactor: string;
action: string;
};
interface GetEditsForRefactorResponse extends Response {
body?: RefactorEditInfo;
}
interface RefactorEditInfo {
edits: FileCodeEdits[];
/**
* An optional location where the editor should start a rename operation once
* the refactoring edits have been applied
*/
renameLocation?: Location;
renameFilename?: string;
}
/**
* Organize imports by:
* 1) Removing unused imports
* 2) Coalescing imports from the same module
* 3) Sorting imports
*/
interface OrganizeImportsRequest extends Request {
command: CommandTypes.OrganizeImports;
arguments: OrganizeImportsRequestArgs;
}
type OrganizeImportsScope = GetCombinedCodeFixScope;
const enum OrganizeImportsMode {
All = "All",
SortAndCombine = "SortAndCombine",
RemoveUnused = "RemoveUnused"
}
interface OrganizeImportsRequestArgs {
scope: OrganizeImportsScope;
/** @deprecated Use `mode` instead */
skipDestructiveCodeActions?: boolean;
mode?: OrganizeImportsMode;
}
interface OrganizeImportsResponse extends Response {
body: readonly FileCodeEdits[];
}
interface GetEditsForFileRenameRequest extends Request {
command: CommandTypes.GetEditsForFileRename;
arguments: GetEditsForFileRenameRequestArgs;
}
/** Note: Paths may also be directories. */
interface GetEditsForFileRenameRequestArgs {
readonly oldFilePath: string;
readonly newFilePath: string;
}
interface GetEditsForFileRenameResponse extends Response {
body: readonly FileCodeEdits[];
}
/**
* Request for the available codefixes at a specific position.
*/
interface CodeFixRequest extends Request {
command: CommandTypes.GetCodeFixes;
arguments: CodeFixRequestArgs;
}
interface GetCombinedCodeFixRequest extends Request {
command: CommandTypes.GetCombinedCodeFix;
arguments: GetCombinedCodeFixRequestArgs;
}
interface GetCombinedCodeFixResponse extends Response {
body: CombinedCodeActions;
}
interface ApplyCodeActionCommandRequest extends Request {
command: CommandTypes.ApplyCodeActionCommand;
arguments: ApplyCodeActionCommandRequestArgs;
}
interface ApplyCodeActionCommandResponse extends Response {
}
interface FileRangeRequestArgs extends FileRequestArgs {
/**
* The line number for the request (1-based).
*/
startLine: number;
/**
* The character offset (on the line) for the request (1-based).
*/
startOffset: number;
/**
* The line number for the request (1-based).
*/
endLine: number;
/**
* The character offset (on the line) for the request (1-based).
*/
endOffset: number;
}
/**
* Instances of this interface specify errorcodes on a specific location in a sourcefile.
*/
interface CodeFixRequestArgs extends FileRangeRequestArgs {
/**
* Errorcodes we want to get the fixes for.
*/
errorCodes: readonly number[];
}
interface GetCombinedCodeFixRequestArgs {
scope: GetCombinedCodeFixScope;
fixId: {};
}
interface GetCombinedCodeFixScope {
type: "file";
args: FileRequestArgs;
}
interface ApplyCodeActionCommandRequestArgs {
/** May also be an array of commands. */
command: {};
}
/**
* Response for GetCodeFixes request.
*/
interface GetCodeFixesResponse extends Response {
body?: CodeAction[];
}
/**
* A request whose arguments specify a file location (file, line, col).
*/
interface FileLocationRequest extends FileRequest {
arguments: FileLocationRequestArgs;
}
/**
* A request to get codes of supported code fixes.
*/
interface GetSupportedCodeFixesRequest extends Request {
command: CommandTypes.GetSupportedCodeFixes;
}
/**
* A response for GetSupportedCodeFixesRequest request.
*/
interface GetSupportedCodeFixesResponse extends Response {
/**
* List of error codes supported by the server.
*/
body?: string[];
}
/**
* A request to get encoded semantic classifications for a span in the file
*/
interface EncodedSemanticClassificationsRequest extends FileRequest {
arguments: EncodedSemanticClassificationsRequestArgs;
}
/**
* Arguments for EncodedSemanticClassificationsRequest request.
*/
interface EncodedSemanticClassificationsRequestArgs extends FileRequestArgs {
/**
* Start position of the span.
*/
start: number;
/**
* Length of the span.
*/
length: number;
/**
* Optional parameter for the semantic highlighting response, if absent it
* defaults to "original".
*/
format?: "original" | "2020";
}
/** The response for a EncodedSemanticClassificationsRequest */
interface EncodedSemanticClassificationsResponse extends Response {
body?: EncodedSemanticClassificationsResponseBody;
}
/**
* Implementation response message. Gives series of text spans depending on the format ar.
*/
interface EncodedSemanticClassificationsResponseBody {
endOfLineState: EndOfLineState;
spans: number[];
}
/**
* Arguments in document highlight request; include: filesToSearch, file,
* line, offset.
*/
interface DocumentHighlightsRequestArgs extends FileLocationRequestArgs {
/**
* List of files to search for document highlights.
*/
filesToSearch: string[];
}
/**
* Go to definition request; value of command field is
* "definition". Return response giving the file locations that
* define the symbol found in file at location line, col.
*/
interface DefinitionRequest extends FileLocationRequest {
command: CommandTypes.Definition;
}
interface DefinitionAndBoundSpanRequest extends FileLocationRequest {
readonly command: CommandTypes.DefinitionAndBoundSpan;
}
interface FindSourceDefinitionRequest extends FileLocationRequest {
readonly command: CommandTypes.FindSourceDefinition;
}
interface DefinitionAndBoundSpanResponse extends Response {
readonly body: DefinitionInfoAndBoundSpan;
}
/**
* Go to type request; value of command field is
* "typeDefinition". Return response giving the file locations that
* define the type for the symbol found in file at location line, col.
*/
interface TypeDefinitionRequest extends FileLocationRequest {
command: CommandTypes.TypeDefinition;
}
/**
* Go to implementation request; value of command field is
* "implementation". Return response giving the file locations that
* implement the symbol found in file at location line, col.
*/
interface ImplementationRequest extends FileLocationRequest {
command: CommandTypes.Implementation;
}
/**
* Location in source code expressed as (one-based) line and (one-based) column offset.
*/
interface Location {
line: number;
offset: number;
}
/**
* Object found in response messages defining a span of text in source code.
*/
interface TextSpan {
/**
* First character of the definition.
*/
start: Location;
/**
* One character past last character of the definition.
*/
end: Location;
}
/**
* Object found in response messages defining a span of text in a specific source file.
*/
interface FileSpan extends TextSpan {
/**
* File containing text span.
*/
file: string;
}
interface JSDocTagInfo {
/** Name of the JSDoc tag */
name: string;
/**
* Comment text after the JSDoc tag -- the text after the tag name until the next tag or end of comment
* Display parts when UserPreferences.displayPartsForJSDoc is true, flattened to string otherwise.
*/
text?: string | SymbolDisplayPart[];
}
interface TextSpanWithContext extends TextSpan {
contextStart?: Location;
contextEnd?: Location;
}
interface FileSpanWithContext extends FileSpan, TextSpanWithContext {
}
interface DefinitionInfo extends FileSpanWithContext {
/**
* When true, the file may or may not exist.
*/
unverified?: boolean;
}
interface DefinitionInfoAndBoundSpan {
definitions: readonly DefinitionInfo[];
textSpan: TextSpan;
}
/**
* Definition response message. Gives text range for definition.
*/
interface DefinitionResponse extends Response {
body?: DefinitionInfo[];
}
interface DefinitionInfoAndBoundSpanResponse extends Response {
body?: DefinitionInfoAndBoundSpan;
}
/** @deprecated Use `DefinitionInfoAndBoundSpanResponse` instead. */
type DefinitionInfoAndBoundSpanReponse = DefinitionInfoAndBoundSpanResponse;
/**
* Definition response message. Gives text range for definition.
*/
interface TypeDefinitionResponse extends Response {
body?: FileSpanWithContext[];
}
/**
* Implementation response message. Gives text range for implementations.
*/
interface ImplementationResponse extends Response {
body?: FileSpanWithContext[];
}
/**
* Request to get brace completion for a location in the file.
*/
interface BraceCompletionRequest extends FileLocationRequest {
command: CommandTypes.BraceCompletion;
arguments: BraceCompletionRequestArgs;
}
/**
* Argument for BraceCompletionRequest request.
*/
interface BraceCompletionRequestArgs extends FileLocationRequestArgs {
/**
* Kind of opening brace
*/
openingBrace: string;
}
interface JsxClosingTagRequest extends FileLocationRequest {
readonly command: CommandTypes.JsxClosingTag;
readonly arguments: JsxClosingTagRequestArgs;
}
interface JsxClosingTagRequestArgs extends FileLocationRequestArgs {
}
interface JsxClosingTagResponse extends Response {
readonly body: TextInsertion;
}
/**
* @deprecated
* Get occurrences request; value of command field is
* "occurrences". Return response giving spans that are relevant
* in the file at a given line and column.
*/
interface OccurrencesRequest extends FileLocationRequest {
command: CommandTypes.Occurrences;
}
/** @deprecated */
interface OccurrencesResponseItem extends FileSpanWithContext {
/**
* True if the occurrence is a write location, false otherwise.
*/
isWriteAccess: boolean;
/**
* True if the occurrence is in a string, undefined otherwise;
*/
isInString?: true;
}
/** @deprecated */
interface OccurrencesResponse extends Response {
body?: OccurrencesResponseItem[];
}
/**
* Get document highlights request; value of command field is
* "documentHighlights". Return response giving spans that are relevant
* in the file at a given line and column.
*/
interface DocumentHighlightsRequest extends FileLocationRequest {
command: CommandTypes.DocumentHighlights;
arguments: DocumentHighlightsRequestArgs;
}
/**
* Span augmented with extra information that denotes the kind of the highlighting to be used for span.
*/
interface HighlightSpan extends TextSpanWithContext {
kind: HighlightSpanKind;
}
/**
* Represents a set of highligh spans for a give name
*/
interface DocumentHighlightsItem {
/**
* File containing highlight spans.
*/
file: string;
/**
* Spans to highlight in file.
*/
highlightSpans: HighlightSpan[];
}
/**
* Response for a DocumentHighlightsRequest request.
*/
interface DocumentHighlightsResponse extends Response {
body?: DocumentHighlightsItem[];
}
/**
* Find references request; value of command field is
* "references". Return response giving the file locations that
* reference the symbol found in file at location line, col.
*/
interface ReferencesRequest extends FileLocationRequest {
command: CommandTypes.References;
}
interface ReferencesResponseItem extends FileSpanWithContext {
/**
* Text of line containing the reference. Including this
* with the response avoids latency of editor loading files
* to show text of reference line (the server already has loaded the referencing files).
*
* If {@link UserPreferences.disableLineTextInReferences} is enabled, the property won't be filled
*/
lineText?: string;
/**
* True if reference is a write location, false otherwise.
*/
isWriteAccess: boolean;
/**
* Present only if the search was triggered from a declaration.
* True indicates that the references refers to the same symbol
* (i.e. has the same meaning) as the declaration that began the
* search.
*/
isDefinition?: boolean;
}
/**
* The body of a "references" response message.
*/
interface ReferencesResponseBody {
/**
* The file locations referencing the symbol.
*/
refs: readonly ReferencesResponseItem[];
/**
* The name of the symbol.
*/
symbolName: string;
/**
* The start character offset of the symbol (on the line provided by the references request).
*/
symbolStartOffset: number;
/**
* The full display name of the symbol.
*/
symbolDisplayString: string;
}
/**
* Response to "references" request.
*/
interface ReferencesResponse extends Response {
body?: ReferencesResponseBody;
}
interface FileReferencesRequest extends FileRequest {
command: CommandTypes.FileReferences;
}
interface FileReferencesResponseBody {
/**
* The file locations referencing the symbol.
*/
refs: readonly ReferencesResponseItem[];
/**
* The name of the symbol.
*/
symbolName: string;
}
interface FileReferencesResponse extends Response {
body?: FileReferencesResponseBody;
}
/**
* Argument for RenameRequest request.
*/
interface RenameRequestArgs extends FileLocationRequestArgs {
/**
* Should text at specified location be found/changed in comments?
*/
findInComments?: boolean;
/**
* Should text at specified location be found/changed in strings?
*/
findInStrings?: boolean;
}
/**
* Rename request; value of command field is "rename". Return
* response giving the file locations that reference the symbol
* found in file at location line, col. Also return full display
* name of the symbol so that client can print it unambiguously.
*/
interface RenameRequest extends FileLocationRequest {
command: CommandTypes.Rename;
arguments: RenameRequestArgs;
}
/**
* Information about the item to be renamed.
*/
type RenameInfo = RenameInfoSuccess | RenameInfoFailure;
interface RenameInfoSuccess {
/**
* True if item can be renamed.
*/
canRename: true;
/**
* File or directory to rename.
* If set, `getEditsForFileRename` should be called instead of `findRenameLocations`.
*/
fileToRename?: string;
/**
* Display name of the item to be renamed.
*/
displayName: string;
/**
* Full display name of item to be renamed.
*/
fullDisplayName: string;
/**
* The items's kind (such as 'className' or 'parameterName' or plain 'text').
*/
kind: ScriptElementKind;
/**
* Optional modifiers for the kind (such as 'public').
*/
kindModifiers: string;
/** Span of text to rename. */
triggerSpan: TextSpan;
}
interface RenameInfoFailure {
canRename: false;
/**
* Error message if item can not be renamed.
*/
localizedErrorMessage: string;
}
/**
* A group of text spans, all in 'file'.
*/
interface SpanGroup {
/** The file to which the spans apply */
file: string;
/** The text spans in this group */
locs: RenameTextSpan[];
}
interface RenameTextSpan extends TextSpanWithContext {
readonly prefixText?: string;
readonly suffixText?: string;
}
interface RenameResponseBody {
/**
* Information about the item to be renamed.
*/
info: RenameInfo;
/**
* An array of span groups (one per file) that refer to the item to be renamed.
*/
locs: readonly SpanGroup[];
}
/**
* Rename response message.
*/
interface RenameResponse extends Response {
body?: RenameResponseBody;
}
/**
* Represents a file in external project.
* External project is project whose set of files, compilation options and open\close state
* is maintained by the client (i.e. if all this data come from .csproj file in Visual Studio).
* External project will exist even if all files in it are closed and should be closed explicitly.
* If external project includes one or more tsconfig.json/jsconfig.json files then tsserver will
* create configured project for every config file but will maintain a link that these projects were created
* as a result of opening external project so they should be removed once external project is closed.
*/
interface ExternalFile {
/**
* Name of file file
*/
fileName: string;
/**
* Script kind of the file
*/
scriptKind?: ScriptKindName | ts.ScriptKind;
/**
* Whether file has mixed content (i.e. .cshtml file that combines html markup with C#/JavaScript)
*/
hasMixedContent?: boolean;
/**
* Content of the file
*/
content?: string;
}
/**
* Represent an external project
*/
interface ExternalProject {
/**
* Project name
*/
projectFileName: string;
/**
* List of root files in project
*/
rootFiles: ExternalFile[];
/**
* Compiler options for the project
*/
options: ExternalProjectCompilerOptions;
/**
* @deprecated typingOptions. Use typeAcquisition instead
*/
typingOptions?: TypeAcquisition;
/**
* Explicitly specified type acquisition for the project
*/
typeAcquisition?: TypeAcquisition;
}
interface CompileOnSaveMixin {
/**
* If compile on save is enabled for the project
*/
compileOnSave?: boolean;
}
/**
* For external projects, some of the project settings are sent together with
* compiler settings.
*/
type ExternalProjectCompilerOptions = CompilerOptions & CompileOnSaveMixin & WatchOptions;
interface FileWithProjectReferenceRedirectInfo {
/**
* Name of file
*/
fileName: string;
/**
* True if the file is primarily included in a referenced project
*/
isSourceOfProjectReferenceRedirect: boolean;
}
/**
* Represents a set of changes that happen in project
*/
interface ProjectChanges {
/**
* List of added files
*/
added: string[] | FileWithProjectReferenceRedirectInfo[];
/**
* List of removed files
*/
removed: string[] | FileWithProjectReferenceRedirectInfo[];
/**
* List of updated files
*/
updated: string[] | FileWithProjectReferenceRedirectInfo[];
/**
* List of files that have had their project reference redirect status updated
* Only provided when the synchronizeProjectList request has includeProjectReferenceRedirectInfo set to true
*/
updatedRedirects?: FileWithProjectReferenceRedirectInfo[];
}
/**
* Information found in a configure request.
*/
interface ConfigureRequestArguments {
/**
* Information about the host, for example 'Emacs 24.4' or
* 'Sublime Text version 3075'
*/
hostInfo?: string;
/**
* If present, tab settings apply only to this file.
*/
file?: string;
/**
* The format options to use during formatting and other code editing features.
*/
formatOptions?: FormatCodeSettings;
preferences?: UserPreferences;
/**
* The host's additional supported .js file extensions
*/
extraFileExtensions?: FileExtensionInfo[];
watchOptions?: WatchOptions;
}
const enum WatchFileKind {
FixedPollingInterval = "FixedPollingInterval",
PriorityPollingInterval = "PriorityPollingInterval",
DynamicPriorityPolling = "DynamicPriorityPolling",
FixedChunkSizePolling = "FixedChunkSizePolling",
UseFsEvents = "UseFsEvents",
UseFsEventsOnParentDirectory = "UseFsEventsOnParentDirectory"
}
const enum WatchDirectoryKind {
UseFsEvents = "UseFsEvents",
FixedPollingInterval = "FixedPollingInterval",
DynamicPriorityPolling = "DynamicPriorityPolling",
FixedChunkSizePolling = "FixedChunkSizePolling"
}
const enum PollingWatchKind {
FixedInterval = "FixedInterval",
PriorityInterval = "PriorityInterval",
DynamicPriority = "DynamicPriority",
FixedChunkSize = "FixedChunkSize"
}
interface WatchOptions {
watchFile?: WatchFileKind | ts.WatchFileKind;
watchDirectory?: WatchDirectoryKind | ts.WatchDirectoryKind;
fallbackPolling?: PollingWatchKind | ts.PollingWatchKind;
synchronousWatchDirectory?: boolean;
excludeDirectories?: string[];
excludeFiles?: string[];
[option: string]: CompilerOptionsValue | undefined;
}
/**
* Configure request; value of command field is "configure". Specifies
* host information, such as host type, tab size, and indent size.
*/
interface ConfigureRequest extends Request {
command: CommandTypes.Configure;
arguments: ConfigureRequestArguments;
}
/**
* Response to "configure" request. This is just an acknowledgement, so
* no body field is required.
*/
interface ConfigureResponse extends Response {
}
interface ConfigurePluginRequestArguments {
pluginName: string;
configuration: any;
}
interface ConfigurePluginRequest extends Request {
command: CommandTypes.ConfigurePlugin;
arguments: ConfigurePluginRequestArguments;
}
interface ConfigurePluginResponse extends Response {
}
interface SelectionRangeRequest extends FileRequest {
command: CommandTypes.SelectionRange;
arguments: SelectionRangeRequestArgs;
}
interface SelectionRangeRequestArgs extends FileRequestArgs {
locations: Location[];
}
interface SelectionRangeResponse extends Response {
body?: SelectionRange[];
}
interface SelectionRange {
textSpan: TextSpan;
parent?: SelectionRange;
}
interface ToggleLineCommentRequest extends FileRequest {
command: CommandTypes.ToggleLineComment;
arguments: FileRangeRequestArgs;
}
interface ToggleMultilineCommentRequest extends FileRequest {
command: CommandTypes.ToggleMultilineComment;
arguments: FileRangeRequestArgs;
}
interface CommentSelectionRequest extends FileRequest {
command: CommandTypes.CommentSelection;
arguments: FileRangeRequestArgs;
}
interface UncommentSelectionRequest extends FileRequest {
command: CommandTypes.UncommentSelection;
arguments: FileRangeRequestArgs;
}
/**
* Information found in an "open" request.
*/
interface OpenRequestArgs extends FileRequestArgs {
/**
* Used when a version of the file content is known to be more up to date than the one on disk.
* Then the known content will be used upon opening instead of the disk copy
*/
fileContent?: string;
/**
* Used to specify the script kind of the file explicitly. It could be one of the following:
* "TS", "JS", "TSX", "JSX"
*/
scriptKindName?: ScriptKindName;
/**
* Used to limit the searching for project config file. If given the searching will stop at this
* root path; otherwise it will go all the way up to the dist root path.
*/
projectRootPath?: string;
}
type ScriptKindName = "TS" | "JS" | "TSX" | "JSX";
/**
* Open request; value of command field is "open". Notify the
* server that the client has file open. The server will not
* monitor the filesystem for changes in this file and will assume
* that the client is updating the server (using the change and/or
* reload messages) when the file changes. Server does not currently
* send a response to an open request.
*/
interface OpenRequest extends Request {
command: CommandTypes.Open;
arguments: OpenRequestArgs;
}
/**
* Request to open or update external project
*/
interface OpenExternalProjectRequest extends Request {
command: CommandTypes.OpenExternalProject;
arguments: OpenExternalProjectArgs;
}
/**
* Arguments to OpenExternalProjectRequest request
*/
type OpenExternalProjectArgs = ExternalProject;
/**
* Request to open multiple external projects
*/
interface OpenExternalProjectsRequest extends Request {
command: CommandTypes.OpenExternalProjects;
arguments: OpenExternalProjectsArgs;
}
/**
* Arguments to OpenExternalProjectsRequest
*/
interface OpenExternalProjectsArgs {
/**
* List of external projects to open or update
*/
projects: ExternalProject[];
}
/**
* Response to OpenExternalProjectRequest request. This is just an acknowledgement, so
* no body field is required.
*/
interface OpenExternalProjectResponse extends Response {
}
/**
* Response to OpenExternalProjectsRequest request. This is just an acknowledgement, so
* no body field is required.
*/
interface OpenExternalProjectsResponse extends Response {
}
/**
* Request to close external project.
*/
interface CloseExternalProjectRequest extends Request {
command: CommandTypes.CloseExternalProject;
arguments: CloseExternalProjectRequestArgs;
}
/**
* Arguments to CloseExternalProjectRequest request
*/
interface CloseExternalProjectRequestArgs {
/**
* Name of the project to close
*/
projectFileName: string;
}
/**
* Response to CloseExternalProjectRequest request. This is just an acknowledgement, so
* no body field is required.
*/
interface CloseExternalProjectResponse extends Response {
}
/**
* Request to synchronize list of open files with the client
*/
interface UpdateOpenRequest extends Request {
command: CommandTypes.UpdateOpen;
arguments: UpdateOpenRequestArgs;
}
/**
* Arguments to UpdateOpenRequest
*/
interface UpdateOpenRequestArgs {
/**
* List of newly open files
*/
openFiles?: OpenRequestArgs[];
/**
* List of open files files that were changes
*/
changedFiles?: FileCodeEdits[];
/**
* List of files that were closed
*/
closedFiles?: string[];
}
/**
* External projects have a typeAcquisition option so they need to be added separately to compiler options for inferred projects.
*/
type InferredProjectCompilerOptions = ExternalProjectCompilerOptions & TypeAcquisition;
/**
* Request to set compiler options for inferred projects.
* External projects are opened / closed explicitly.
* Configured projects are opened when user opens loose file that has 'tsconfig.json' or 'jsconfig.json' anywhere in one of containing folders.
* This configuration file will be used to obtain a list of files and configuration settings for the project.
* Inferred projects are created when user opens a loose file that is not the part of external project
* or configured project and will contain only open file and transitive closure of referenced files if 'useOneInferredProject' is false,
* or all open loose files and its transitive closure of referenced files if 'useOneInferredProject' is true.
*/
interface SetCompilerOptionsForInferredProjectsRequest extends Request {
command: CommandTypes.CompilerOptionsForInferredProjects;
arguments: SetCompilerOptionsForInferredProjectsArgs;
}
/**
* Argument for SetCompilerOptionsForInferredProjectsRequest request.
*/
interface SetCompilerOptionsForInferredProjectsArgs {
/**
* Compiler options to be used with inferred projects.
*/
options: InferredProjectCompilerOptions;
/**
* Specifies the project root path used to scope compiler options.
* It is an error to provide this property if the server has not been started with
* `useInferredProjectPerProjectRoot` enabled.
*/
projectRootPath?: string;
}
/**
* Response to SetCompilerOptionsForInferredProjectsResponse request. This is just an acknowledgement, so
* no body field is required.
*/
interface SetCompilerOptionsForInferredProjectsResponse extends Response {
}
/**
* Exit request; value of command field is "exit". Ask the server process
* to exit.
*/
interface ExitRequest extends Request {
command: CommandTypes.Exit;
}
/**
* Close request; value of command field is "close". Notify the
* server that the client has closed a previously open file. If
* file is still referenced by open files, the server will resume
* monitoring the filesystem for changes to file. Server does not
* currently send a response to a close request.
*/
interface CloseRequest extends FileRequest {
command: CommandTypes.Close;
}
/**
* Request to obtain the list of files that should be regenerated if target file is recompiled.
* NOTE: this us query-only operation and does not generate any output on disk.
*/
interface CompileOnSaveAffectedFileListRequest extends FileRequest {
command: CommandTypes.CompileOnSaveAffectedFileList;
}
/**
* Contains a list of files that should be regenerated in a project
*/
interface CompileOnSaveAffectedFileListSingleProject {
/**
* Project name
*/
projectFileName: string;
/**
* List of files names that should be recompiled
*/
fileNames: string[];
/**
* true if project uses outFile or out compiler option
*/
projectUsesOutFile: boolean;
}
/**
* Response for CompileOnSaveAffectedFileListRequest request;
*/
interface CompileOnSaveAffectedFileListResponse extends Response {
body: CompileOnSaveAffectedFileListSingleProject[];
}
/**
* Request to recompile the file. All generated outputs (.js, .d.ts or .js.map files) is written on disk.
*/
interface CompileOnSaveEmitFileRequest extends FileRequest {
command: CommandTypes.CompileOnSaveEmitFile;
arguments: CompileOnSaveEmitFileRequestArgs;
}
/**
* Arguments for CompileOnSaveEmitFileRequest
*/
interface CompileOnSaveEmitFileRequestArgs extends FileRequestArgs {
/**
* if true - then file should be recompiled even if it does not have any changes.
*/
forced?: boolean;
includeLinePosition?: boolean;
/** if true - return response as object with emitSkipped and diagnostics */
richResponse?: boolean;
}
interface CompileOnSaveEmitFileResponse extends Response {
body: boolean | EmitResult;
}
interface EmitResult {
emitSkipped: boolean;
diagnostics: Diagnostic[] | DiagnosticWithLinePosition[];
}
/**
* Quickinfo request; value of command field is
* "quickinfo". Return response giving a quick type and
* documentation string for the symbol found in file at location
* line, col.
*/
interface QuickInfoRequest extends FileLocationRequest {
command: CommandTypes.Quickinfo;
arguments: FileLocationRequestArgs;
}
/**
* Body of QuickInfoResponse.
*/
interface QuickInfoResponseBody {
/**
* The symbol's kind (such as 'className' or 'parameterName' or plain 'text').
*/
kind: ScriptElementKind;
/**
* Optional modifiers for the kind (such as 'public').
*/
kindModifiers: string;
/**
* Starting file location of symbol.
*/
start: Location;
/**
* One past last character of symbol.
*/
end: Location;
/**
* Type and kind of symbol.
*/
displayString: string;
/**
* Documentation associated with symbol.
* Display parts when UserPreferences.displayPartsForJSDoc is true, flattened to string otherwise.
*/
documentation: string | SymbolDisplayPart[];
/**
* JSDoc tags associated with symbol.
*/
tags: JSDocTagInfo[];
}
/**
* Quickinfo response message.
*/
interface QuickInfoResponse extends Response {
body?: QuickInfoResponseBody;
}
/**
* Arguments for format messages.
*/
interface FormatRequestArgs extends FileLocationRequestArgs {
/**
* Last line of range for which to format text in file.
*/
endLine: number;
/**
* Character offset on last line of range for which to format text in file.
*/
endOffset: number;
/**
* Format options to be used.
*/
options?: FormatCodeSettings;
}
/**
* Format request; value of command field is "format". Return
* response giving zero or more edit instructions. The edit
* instructions will be sorted in file order. Applying the edit
* instructions in reverse to file will result in correctly
* reformatted text.
*/
interface FormatRequest extends FileLocationRequest {
command: CommandTypes.Format;
arguments: FormatRequestArgs;
}
/**
* Object found in response messages defining an editing
* instruction for a span of text in source code. The effect of
* this instruction is to replace the text starting at start and
* ending one character before end with newText. For an insertion,
* the text span is empty. For a deletion, newText is empty.
*/
interface CodeEdit {
/**
* First character of the text span to edit.
*/
start: Location;
/**
* One character past last character of the text span to edit.
*/
end: Location;
/**
* Replace the span defined above with this string (may be
* the empty string).
*/
newText: string;
}
interface FileCodeEdits {
fileName: string;
textChanges: CodeEdit[];
}
interface CodeFixResponse extends Response {
/** The code actions that are available */
body?: CodeFixAction[];
}
interface CodeAction {
/** Description of the code action to display in the UI of the editor */
description: string;
/** Text changes to apply to each file as part of the code action */
changes: FileCodeEdits[];
/** A command is an opaque object that should be passed to `ApplyCodeActionCommandRequestArgs` without modification. */
commands?: {}[];
}
interface CombinedCodeActions {
changes: readonly FileCodeEdits[];
commands?: readonly {}[];
}
interface CodeFixAction extends CodeAction {
/** Short name to identify the fix, for use by telemetry. */
fixName: string;
/**
* If present, one may call 'getCombinedCodeFix' with this fixId.
* This may be omitted to indicate that the code fix can't be applied in a group.
*/
fixId?: {};
/** Should be present if and only if 'fixId' is. */
fixAllDescription?: string;
}
/**
* Format and format on key response message.
*/
interface FormatResponse extends Response {
body?: CodeEdit[];
}
/**
* Arguments for format on key messages.
*/
interface FormatOnKeyRequestArgs extends FileLocationRequestArgs {
/**
* Key pressed (';', '\n', or '}').
*/
key: string;
options?: FormatCodeSettings;
}
/**
* Format on key request; value of command field is
* "formatonkey". Given file location and key typed (as string),
* return response giving zero or more edit instructions. The
* edit instructions will be sorted in file order. Applying the
* edit instructions in reverse to file will result in correctly
* reformatted text.
*/
interface FormatOnKeyRequest extends FileLocationRequest {
command: CommandTypes.Formatonkey;
arguments: FormatOnKeyRequestArgs;
}
type CompletionsTriggerCharacter = "." | '"' | "'" | "`" | "/" | "@" | "<" | "#" | " ";
const enum CompletionTriggerKind {
/** Completion was triggered by typing an identifier, manual invocation (e.g Ctrl+Space) or via API. */
Invoked = 1,
/** Completion was triggered by a trigger character. */
TriggerCharacter = 2,
/** Completion was re-triggered as the current completion list is incomplete. */
TriggerForIncompleteCompletions = 3
}
/**
* Arguments for completions messages.
*/
interface CompletionsRequestArgs extends FileLocationRequestArgs {
/**
* Optional prefix to apply to possible completions.
*/
prefix?: string;
/**
* Character that was responsible for triggering completion.
* Should be `undefined` if a user manually requested completion.
*/
triggerCharacter?: CompletionsTriggerCharacter;
triggerKind?: CompletionTriggerKind;
/**
* @deprecated Use UserPreferences.includeCompletionsForModuleExports
*/
includeExternalModuleExports?: boolean;
/**
* @deprecated Use UserPreferences.includeCompletionsWithInsertText
*/
includeInsertTextCompletions?: boolean;
}
/**
* Completions request; value of command field is "completions".
* Given a file location (file, line, col) and a prefix (which may
* be the empty string), return the possible completions that
* begin with prefix.
*/
interface CompletionsRequest extends FileLocationRequest {
command: CommandTypes.Completions | CommandTypes.CompletionInfo;
arguments: CompletionsRequestArgs;
}
/**
* Arguments for completion details request.
*/
interface CompletionDetailsRequestArgs extends FileLocationRequestArgs {
/**
* Names of one or more entries for which to obtain details.
*/
entryNames: (string | CompletionEntryIdentifier)[];
}
interface CompletionEntryIdentifier {
name: string;
source?: string;
data?: unknown;
}
/**
* Completion entry details request; value of command field is
* "completionEntryDetails". Given a file location (file, line,
* col) and an array of completion entry names return more
* detailed information for each completion entry.
*/
interface CompletionDetailsRequest extends FileLocationRequest {
command: CommandTypes.CompletionDetails;
arguments: CompletionDetailsRequestArgs;
}
/**
* Part of a symbol description.
*/
interface SymbolDisplayPart {
/**
* Text of an item describing the symbol.
*/
text: string;
/**
* The symbol's kind (such as 'className' or 'parameterName' or plain 'text').
*/
kind: string;
}
/** A part of a symbol description that links from a jsdoc @link tag to a declaration */
interface JSDocLinkDisplayPart extends SymbolDisplayPart {
/** The location of the declaration that the @link tag links to. */
target: FileSpan;
}
/**
* An item found in a completion response.
*/
interface CompletionEntry {
/**
* The symbol's name.
*/
name: string;
/**
* The symbol's kind (such as 'className' or 'parameterName').
*/
kind: ScriptElementKind;
/**
* Optional modifiers for the kind (such as 'public').
*/
kindModifiers?: string;
/**
* A string that is used for comparing completion items so that they can be ordered. This
* is often the same as the name but may be different in certain circumstances.
*/
sortText: string;
/**
* Text to insert instead of `name`.
* This is used to support bracketed completions; If `name` might be "a-b" but `insertText` would be `["a-b"]`,
* coupled with `replacementSpan` to replace a dotted access with a bracket access.
*/
insertText?: string;
/**
* `insertText` should be interpreted as a snippet if true.
*/
isSnippet?: true;
/**
* An optional span that indicates the text to be replaced by this completion item.
* If present, this span should be used instead of the default one.
* It will be set if the required span differs from the one generated by the default replacement behavior.
*/
replacementSpan?: TextSpan;
/**
* Indicates whether commiting this completion entry will require additional code actions to be
* made to avoid errors. The CompletionEntryDetails will have these actions.
*/
hasAction?: true;
/**
* Identifier (not necessarily human-readable) identifying where this completion came from.
*/
source?: string;
/**
* Human-readable description of the `source`.
*/
sourceDisplay?: SymbolDisplayPart[];
/**
* Additional details for the label.
*/
labelDetails?: CompletionEntryLabelDetails;
/**
* If true, this completion should be highlighted as recommended. There will only be one of these.
* This will be set when we know the user should write an expression with a certain type and that type is an enum or constructable class.
* Then either that enum/class or a namespace containing it will be the recommended symbol.
*/
isRecommended?: true;
/**
* If true, this completion was generated from traversing the name table of an unchecked JS file,
* and therefore may not be accurate.
*/
isFromUncheckedFile?: true;
/**
* If true, this completion was for an auto-import of a module not yet in the program, but listed
* in the project package.json. Used for telemetry reporting.
*/
isPackageJsonImport?: true;
/**
* If true, this completion was an auto-import-style completion of an import statement (i.e., the
* module specifier was inserted along with the imported identifier). Used for telemetry reporting.
*/
isImportStatementCompletion?: true;
/**
* A property to be sent back to TS Server in the CompletionDetailsRequest, along with `name`,
* that allows TS Server to look up the symbol represented by the completion item, disambiguating
* items with the same name.
*/
data?: unknown;
}
interface CompletionEntryLabelDetails {
/**
* An optional string which is rendered less prominently directly after
* {@link CompletionEntry.name name}, without any spacing. Should be
* used for function signatures or type annotations.
*/
detail?: string;
/**
* An optional string which is rendered less prominently after
* {@link CompletionEntryLabelDetails.detail}. Should be used for fully qualified
* names or file path.
*/
description?: string;
}
/**
* Additional completion entry details, available on demand
*/
interface CompletionEntryDetails {
/**
* The symbol's name.
*/
name: string;
/**
* The symbol's kind (such as 'className' or 'parameterName').
*/
kind: ScriptElementKind;
/**
* Optional modifiers for the kind (such as 'public').
*/
kindModifiers: string;
/**
* Display parts of the symbol (similar to quick info).
*/
displayParts: SymbolDisplayPart[];
/**
* Documentation strings for the symbol.
*/
documentation?: SymbolDisplayPart[];
/**
* JSDoc tags for the symbol.
*/
tags?: JSDocTagInfo[];
/**
* The associated code actions for this entry
*/
codeActions?: CodeAction[];
/**
* @deprecated Use `sourceDisplay` instead.
*/
source?: SymbolDisplayPart[];
/**
* Human-readable description of the `source` from the CompletionEntry.
*/
sourceDisplay?: SymbolDisplayPart[];
}
/** @deprecated Prefer CompletionInfoResponse, which supports several top-level fields in addition to the array of entries. */
interface CompletionsResponse extends Response {
body?: CompletionEntry[];
}
interface CompletionInfoResponse extends Response {
body?: CompletionInfo;
}
interface CompletionInfo {
readonly flags?: number;
readonly isGlobalCompletion: boolean;
readonly isMemberCompletion: boolean;
readonly isNewIdentifierLocation: boolean;
/**
* In the absence of `CompletionEntry["replacementSpan"]`, the editor may choose whether to use
* this span or its default one. If `CompletionEntry["replacementSpan"]` is defined, that span
* must be used to commit that completion entry.
*/
readonly optionalReplacementSpan?: TextSpan;
readonly isIncomplete?: boolean;
readonly entries: readonly CompletionEntry[];
}
interface CompletionDetailsResponse extends Response {
body?: CompletionEntryDetails[];
}
/**
* Signature help information for a single parameter
*/
interface SignatureHelpParameter {
/**
* The parameter's name
*/
name: string;
/**
* Documentation of the parameter.
*/
documentation: SymbolDisplayPart[];
/**
* Display parts of the parameter.
*/
displayParts: SymbolDisplayPart[];
/**
* Whether the parameter is optional or not.
*/
isOptional: boolean;
}
/**
* Represents a single signature to show in signature help.
*/
interface SignatureHelpItem {
/**
* Whether the signature accepts a variable number of arguments.
*/
isVariadic: boolean;
/**
* The prefix display parts.
*/
prefixDisplayParts: SymbolDisplayPart[];
/**
* The suffix display parts.
*/
suffixDisplayParts: SymbolDisplayPart[];
/**
* The separator display parts.
*/
separatorDisplayParts: SymbolDisplayPart[];
/**
* The signature helps items for the parameters.
*/
parameters: SignatureHelpParameter[];
/**
* The signature's documentation
*/
documentation: SymbolDisplayPart[];
/**
* The signature's JSDoc tags
*/
tags: JSDocTagInfo[];
}
/**
* Signature help items found in the response of a signature help request.
*/
interface SignatureHelpItems {
/**
* The signature help items.
*/
items: SignatureHelpItem[];
/**
* The span for which signature help should appear on a signature
*/
applicableSpan: TextSpan;
/**
* The item selected in the set of available help items.
*/
selectedItemIndex: number;
/**
* The argument selected in the set of parameters.
*/
argumentIndex: number;
/**
* The argument count
*/
argumentCount: number;
}
type SignatureHelpTriggerCharacter = "," | "(" | "<";
type SignatureHelpRetriggerCharacter = SignatureHelpTriggerCharacter | ")";
/**
* Arguments of a signature help request.
*/
interface SignatureHelpRequestArgs extends FileLocationRequestArgs {
/**
* Reason why signature help was invoked.
* See each individual possible
*/
triggerReason?: SignatureHelpTriggerReason;
}
type SignatureHelpTriggerReason = SignatureHelpInvokedReason | SignatureHelpCharacterTypedReason | SignatureHelpRetriggeredReason;
/**
* Signals that the user manually requested signature help.
* The language service will unconditionally attempt to provide a result.
*/
interface SignatureHelpInvokedReason {
kind: "invoked";
triggerCharacter?: undefined;
}
/**
* Signals that the signature help request came from a user typing a character.
* Depending on the character and the syntactic context, the request may or may not be served a result.
*/
interface SignatureHelpCharacterTypedReason {
kind: "characterTyped";
/**
* Character that was responsible for triggering signature help.
*/
triggerCharacter: SignatureHelpTriggerCharacter;
}
/**
* Signals that this signature help request came from typing a character or moving the cursor.
* This should only occur if a signature help session was already active and the editor needs to see if it should adjust.
* The language service will unconditionally attempt to provide a result.
* `triggerCharacter` can be `undefined` for a retrigger caused by a cursor move.
*/
interface SignatureHelpRetriggeredReason {
kind: "retrigger";
/**
* Character that was responsible for triggering signature help.
*/
triggerCharacter?: SignatureHelpRetriggerCharacter;
}
/**
* Signature help request; value of command field is "signatureHelp".
* Given a file location (file, line, col), return the signature
* help.
*/
interface SignatureHelpRequest extends FileLocationRequest {
command: CommandTypes.SignatureHelp;
arguments: SignatureHelpRequestArgs;
}
/**
* Response object for a SignatureHelpRequest.
*/
interface SignatureHelpResponse extends Response {
body?: SignatureHelpItems;
}
type InlayHintKind = "Type" | "Parameter" | "Enum";
interface InlayHintsRequestArgs extends FileRequestArgs {
/**
* Start position of the span.
*/
start: number;
/**
* Length of the span.
*/
length: number;
}
interface InlayHintsRequest extends Request {
command: CommandTypes.ProvideInlayHints;
arguments: InlayHintsRequestArgs;
}
interface InlayHintItem {
text: string;
position: Location;
kind: InlayHintKind;
whitespaceBefore?: boolean;
whitespaceAfter?: boolean;
}
interface InlayHintsResponse extends Response {
body?: InlayHintItem[];
}
/**
* Synchronous request for semantic diagnostics of one file.
*/
interface SemanticDiagnosticsSyncRequest extends FileRequest {
command: CommandTypes.SemanticDiagnosticsSync;
arguments: SemanticDiagnosticsSyncRequestArgs;
}
interface SemanticDiagnosticsSyncRequestArgs extends FileRequestArgs {
includeLinePosition?: boolean;
}
/**
* Response object for synchronous sematic diagnostics request.
*/
interface SemanticDiagnosticsSyncResponse extends Response {
body?: Diagnostic[] | DiagnosticWithLinePosition[];
}
interface SuggestionDiagnosticsSyncRequest extends FileRequest {
command: CommandTypes.SuggestionDiagnosticsSync;
arguments: SuggestionDiagnosticsSyncRequestArgs;
}
type SuggestionDiagnosticsSyncRequestArgs = SemanticDiagnosticsSyncRequestArgs;
type SuggestionDiagnosticsSyncResponse = SemanticDiagnosticsSyncResponse;
/**
* Synchronous request for syntactic diagnostics of one file.
*/
interface SyntacticDiagnosticsSyncRequest extends FileRequest {
command: CommandTypes.SyntacticDiagnosticsSync;
arguments: SyntacticDiagnosticsSyncRequestArgs;
}
interface SyntacticDiagnosticsSyncRequestArgs extends FileRequestArgs {
includeLinePosition?: boolean;
}
/**
* Response object for synchronous syntactic diagnostics request.
*/
interface SyntacticDiagnosticsSyncResponse extends Response {
body?: Diagnostic[] | DiagnosticWithLinePosition[];
}
/**
* Arguments for GeterrForProject request.
*/
interface GeterrForProjectRequestArgs {
/**
* the file requesting project error list
*/
file: string;
/**
* Delay in milliseconds to wait before starting to compute
* errors for the files in the file list
*/
delay: number;
}
/**
* GeterrForProjectRequest request; value of command field is
* "geterrForProject". It works similarly with 'Geterr', only
* it request for every file in this project.
*/
interface GeterrForProjectRequest extends Request {
command: CommandTypes.GeterrForProject;
arguments: GeterrForProjectRequestArgs;
}
/**
* Arguments for geterr messages.
*/
interface GeterrRequestArgs {
/**
* List of file names for which to compute compiler errors.
* The files will be checked in list order.
*/
files: string[];
/**
* Delay in milliseconds to wait before starting to compute
* errors for the files in the file list
*/
delay: number;
}
/**
* Geterr request; value of command field is "geterr". Wait for
* delay milliseconds and then, if during the wait no change or
* reload messages have arrived for the first file in the files
* list, get the syntactic errors for the file, field requests,
* and then get the semantic errors for the file. Repeat with a
* smaller delay for each subsequent file on the files list. Best
* practice for an editor is to send a file list containing each
* file that is currently visible, in most-recently-used order.
*/
interface GeterrRequest extends Request {
command: CommandTypes.Geterr;
arguments: GeterrRequestArgs;
}
type RequestCompletedEventName = "requestCompleted";
/**
* Event that is sent when server have finished processing request with specified id.
*/
interface RequestCompletedEvent extends Event {
event: RequestCompletedEventName;
body: RequestCompletedEventBody;
}
interface RequestCompletedEventBody {
request_seq: number;
}
/**
* Item of diagnostic information found in a DiagnosticEvent message.
*/
interface Diagnostic {
/**
* Starting file location at which text applies.
*/
start: Location;
/**
* The last file location at which the text applies.
*/
end: Location;
/**
* Text of diagnostic message.
*/
text: string;
/**
* The category of the diagnostic message, e.g. "error", "warning", or "suggestion".
*/
category: string;
reportsUnnecessary?: {};
reportsDeprecated?: {};
/**
* Any related spans the diagnostic may have, such as other locations relevant to an error, such as declarartion sites
*/
relatedInformation?: DiagnosticRelatedInformation[];
/**
* The error code of the diagnostic message.
*/
code?: number;
/**
* The name of the plugin reporting the message.
*/
source?: string;
}
interface DiagnosticWithFileName extends Diagnostic {
/**
* Name of the file the diagnostic is in
*/
fileName: string;
}
/**
* Represents additional spans returned with a diagnostic which are relevant to it
*/
interface DiagnosticRelatedInformation {
/**
* The category of the related information message, e.g. "error", "warning", or "suggestion".
*/
category: string;
/**
* The code used ot identify the related information
*/
code: number;
/**
* Text of related or additional information.
*/
message: string;
/**
* Associated location
*/
span?: FileSpan;
}
interface DiagnosticEventBody {
/**
* The file for which diagnostic information is reported.
*/
file: string;
/**
* An array of diagnostic information items.
*/
diagnostics: Diagnostic[];
}
type DiagnosticEventKind = "semanticDiag" | "syntaxDiag" | "suggestionDiag";
/**
* Event message for DiagnosticEventKind event types.
* These events provide syntactic and semantic errors for a file.
*/
interface DiagnosticEvent extends Event {
body?: DiagnosticEventBody;
event: DiagnosticEventKind;
}
interface ConfigFileDiagnosticEventBody {
/**
* The file which trigged the searching and error-checking of the config file
*/
triggerFile: string;
/**
* The name of the found config file.
*/
configFile: string;
/**
* An arry of diagnostic information items for the found config file.
*/
diagnostics: DiagnosticWithFileName[];
}
/**
* Event message for "configFileDiag" event type.
* This event provides errors for a found config file.
*/
interface ConfigFileDiagnosticEvent extends Event {
body?: ConfigFileDiagnosticEventBody;
event: "configFileDiag";
}
type ProjectLanguageServiceStateEventName = "projectLanguageServiceState";
interface ProjectLanguageServiceStateEvent extends Event {
event: ProjectLanguageServiceStateEventName;
body?: ProjectLanguageServiceStateEventBody;
}
interface ProjectLanguageServiceStateEventBody {
/**
* Project name that has changes in the state of language service.
* For configured projects this will be the config file path.
* For external projects this will be the name of the projects specified when project was open.
* For inferred projects this event is not raised.
*/
projectName: string;
/**
* True if language service state switched from disabled to enabled
* and false otherwise.
*/
languageServiceEnabled: boolean;
}
type ProjectsUpdatedInBackgroundEventName = "projectsUpdatedInBackground";
interface ProjectsUpdatedInBackgroundEvent extends Event {
event: ProjectsUpdatedInBackgroundEventName;
body: ProjectsUpdatedInBackgroundEventBody;
}
interface ProjectsUpdatedInBackgroundEventBody {
/**
* Current set of open files
*/
openFiles: string[];
}
type ProjectLoadingStartEventName = "projectLoadingStart";
interface ProjectLoadingStartEvent extends Event {
event: ProjectLoadingStartEventName;
body: ProjectLoadingStartEventBody;
}
interface ProjectLoadingStartEventBody {
/** name of the project */
projectName: string;
/** reason for loading */
reason: string;
}
type ProjectLoadingFinishEventName = "projectLoadingFinish";
interface ProjectLoadingFinishEvent extends Event {
event: ProjectLoadingFinishEventName;
body: ProjectLoadingFinishEventBody;
}
interface ProjectLoadingFinishEventBody {
/** name of the project */
projectName: string;
}
type SurveyReadyEventName = "surveyReady";
interface SurveyReadyEvent extends Event {
event: SurveyReadyEventName;
body: SurveyReadyEventBody;
}
interface SurveyReadyEventBody {
/** Name of the survey. This is an internal machine- and programmer-friendly name */
surveyId: string;
}
type LargeFileReferencedEventName = "largeFileReferenced";
interface LargeFileReferencedEvent extends Event {
event: LargeFileReferencedEventName;
body: LargeFileReferencedEventBody;
}
interface LargeFileReferencedEventBody {
/**
* name of the large file being loaded
*/
file: string;
/**
* size of the file
*/
fileSize: number;
/**
* max file size allowed on the server
*/
maxFileSize: number;
}
/**
* Arguments for reload request.
*/
interface ReloadRequestArgs extends FileRequestArgs {
/**
* Name of temporary file from which to reload file
* contents. May be same as file.
*/
tmpfile: string;
}
/**
* Reload request message; value of command field is "reload".
* Reload contents of file with name given by the 'file' argument
* from temporary file with name given by the 'tmpfile' argument.
* The two names can be identical.
*/
interface ReloadRequest extends FileRequest {
command: CommandTypes.Reload;
arguments: ReloadRequestArgs;
}
/**
* Response to "reload" request. This is just an acknowledgement, so
* no body field is required.
*/
interface ReloadResponse extends Response {
}
/**
* Arguments for saveto request.
*/
interface SavetoRequestArgs extends FileRequestArgs {
/**
* Name of temporary file into which to save server's view of
* file contents.
*/
tmpfile: string;
}
/**
* Saveto request message; value of command field is "saveto".
* For debugging purposes, save to a temporaryfile (named by
* argument 'tmpfile') the contents of file named by argument
* 'file'. The server does not currently send a response to a
* "saveto" request.
*/
interface SavetoRequest extends FileRequest {
command: CommandTypes.Saveto;
arguments: SavetoRequestArgs;
}
/**
* Arguments for navto request message.
*/
interface NavtoRequestArgs {
/**
* Search term to navigate to from current location; term can
* be '.*' or an identifier prefix.
*/
searchValue: string;
/**
* Optional limit on the number of items to return.
*/
maxResultCount?: number;
/**
* The file for the request (absolute pathname required).
*/
file?: string;
/**
* Optional flag to indicate we want results for just the current file
* or the entire project.
*/
currentFileOnly?: boolean;
projectFileName?: string;
}
/**
* Navto request message; value of command field is "navto".
* Return list of objects giving file locations and symbols that
* match the search term given in argument 'searchTerm'. The
* context for the search is given by the named file.
*/
interface NavtoRequest extends Request {
command: CommandTypes.Navto;
arguments: NavtoRequestArgs;
}
/**
* An item found in a navto response.
*/
interface NavtoItem extends FileSpan {
/**
* The symbol's name.
*/
name: string;
/**
* The symbol's kind (such as 'className' or 'parameterName').
*/
kind: ScriptElementKind;
/**
* exact, substring, or prefix.
*/
matchKind: string;
/**
* If this was a case sensitive or insensitive match.
*/
isCaseSensitive: boolean;
/**
* Optional modifiers for the kind (such as 'public').
*/
kindModifiers?: string;
/**
* Name of symbol's container symbol (if any); for example,
* the class name if symbol is a class member.
*/
containerName?: string;
/**
* Kind of symbol's container symbol (if any).
*/
containerKind?: ScriptElementKind;
}
/**
* Navto response message. Body is an array of navto items. Each
* item gives a symbol that matched the search term.
*/
interface NavtoResponse extends Response {
body?: NavtoItem[];
}
/**
* Arguments for change request message.
*/
interface ChangeRequestArgs extends FormatRequestArgs {
/**
* Optional string to insert at location (file, line, offset).
*/
insertString?: string;
}
/**
* Change request message; value of command field is "change".
* Update the server's view of the file named by argument 'file'.
* Server does not currently send a response to a change request.
*/
interface ChangeRequest extends FileLocationRequest {
command: CommandTypes.Change;
arguments: ChangeRequestArgs;
}
/**
* Response to "brace" request.
*/
interface BraceResponse extends Response {
body?: TextSpan[];
}
/**
* Brace matching request; value of command field is "brace".
* Return response giving the file locations of matching braces
* found in file at location line, offset.
*/
interface BraceRequest extends FileLocationRequest {
command: CommandTypes.Brace;
}
/**
* NavBar items request; value of command field is "navbar".
* Return response giving the list of navigation bar entries
* extracted from the requested file.
*/
interface NavBarRequest extends FileRequest {
command: CommandTypes.NavBar;
}
/**
* NavTree request; value of command field is "navtree".
* Return response giving the navigation tree of the requested file.
*/
interface NavTreeRequest extends FileRequest {
command: CommandTypes.NavTree;
}
interface NavigationBarItem {
/**
* The item's display text.
*/
text: string;
/**
* The symbol's kind (such as 'className' or 'parameterName').
*/
kind: ScriptElementKind;
/**
* Optional modifiers for the kind (such as 'public').
*/
kindModifiers?: string;
/**
* The definition locations of the item.
*/
spans: TextSpan[];
/**
* Optional children.
*/
childItems?: NavigationBarItem[];
/**
* Number of levels deep this item should appear.
*/
indent: number;
}
/** protocol.NavigationTree is identical to ts.NavigationTree, except using protocol.TextSpan instead of ts.TextSpan */
interface NavigationTree {
text: string;
kind: ScriptElementKind;
kindModifiers: string;
spans: TextSpan[];
nameSpan: TextSpan | undefined;
childItems?: NavigationTree[];
}
type TelemetryEventName = "telemetry";
interface TelemetryEvent extends Event {
event: TelemetryEventName;
body: TelemetryEventBody;
}
interface TelemetryEventBody {
telemetryEventName: string;
payload: any;
}
type TypesInstallerInitializationFailedEventName = "typesInstallerInitializationFailed";
interface TypesInstallerInitializationFailedEvent extends Event {
event: TypesInstallerInitializationFailedEventName;
body: TypesInstallerInitializationFailedEventBody;
}
interface TypesInstallerInitializationFailedEventBody {
message: string;
}
type TypingsInstalledTelemetryEventName = "typingsInstalled";
interface TypingsInstalledTelemetryEventBody extends TelemetryEventBody {
telemetryEventName: TypingsInstalledTelemetryEventName;
payload: TypingsInstalledTelemetryEventPayload;
}
interface TypingsInstalledTelemetryEventPayload {
/**
* Comma separated list of installed typing packages
*/
installedPackages: string;
/**
* true if install request succeeded, otherwise - false
*/
installSuccess: boolean;
/**
* version of typings installer
*/
typingsInstallerVersion: string;
}
type BeginInstallTypesEventName = "beginInstallTypes";
type EndInstallTypesEventName = "endInstallTypes";
interface BeginInstallTypesEvent extends Event {
event: BeginInstallTypesEventName;
body: BeginInstallTypesEventBody;
}
interface EndInstallTypesEvent extends Event {
event: EndInstallTypesEventName;
body: EndInstallTypesEventBody;
}
interface InstallTypesEventBody {
/**
* correlation id to match begin and end events
*/
eventId: number;
/**
* list of packages to install
*/
packages: readonly string[];
}
interface BeginInstallTypesEventBody extends InstallTypesEventBody {
}
interface EndInstallTypesEventBody extends InstallTypesEventBody {
/**
* true if installation succeeded, otherwise false
*/
success: boolean;
}
interface NavBarResponse extends Response {
body?: NavigationBarItem[];
}
interface NavTreeResponse extends Response {
body?: NavigationTree;
}
interface CallHierarchyItem {
name: string;
kind: ScriptElementKind;
kindModifiers?: string;
file: string;
span: TextSpan;
selectionSpan: TextSpan;
containerName?: string;
}
interface CallHierarchyIncomingCall {
from: CallHierarchyItem;
fromSpans: TextSpan[];
}
interface CallHierarchyOutgoingCall {
to: CallHierarchyItem;
fromSpans: TextSpan[];
}
interface PrepareCallHierarchyRequest extends FileLocationRequest {
command: CommandTypes.PrepareCallHierarchy;
}
interface PrepareCallHierarchyResponse extends Response {
readonly body: CallHierarchyItem | CallHierarchyItem[];
}
interface ProvideCallHierarchyIncomingCallsRequest extends FileLocationRequest {
command: CommandTypes.ProvideCallHierarchyIncomingCalls;
}
interface ProvideCallHierarchyIncomingCallsResponse extends Response {
readonly body: CallHierarchyIncomingCall[];
}
interface ProvideCallHierarchyOutgoingCallsRequest extends FileLocationRequest {
command: CommandTypes.ProvideCallHierarchyOutgoingCalls;
}
interface ProvideCallHierarchyOutgoingCallsResponse extends Response {
readonly body: CallHierarchyOutgoingCall[];
}
const enum IndentStyle {
None = "None",
Block = "Block",
Smart = "Smart"
}
enum SemicolonPreference {
Ignore = "ignore",
Insert = "insert",
Remove = "remove"
}
interface EditorSettings {
baseIndentSize?: number;
indentSize?: number;
tabSize?: number;
newLineCharacter?: string;
convertTabsToSpaces?: boolean;
indentStyle?: IndentStyle | ts.IndentStyle;
trimTrailingWhitespace?: boolean;
}
interface FormatCodeSettings extends EditorSettings {
insertSpaceAfterCommaDelimiter?: boolean;
insertSpaceAfterSemicolonInForStatements?: boolean;
insertSpaceBeforeAndAfterBinaryOperators?: boolean;
insertSpaceAfterConstructor?: boolean;
insertSpaceAfterKeywordsInControlFlowStatements?: boolean;
insertSpaceAfterFunctionKeywordForAnonymousFunctions?: boolean;
insertSpaceAfterOpeningAndBeforeClosingEmptyBraces?: boolean;
insertSpaceAfterOpeningAndBeforeClosingNonemptyParenthesis?: boolean;
insertSpaceAfterOpeningAndBeforeClosingNonemptyBrackets?: boolean;
insertSpaceAfterOpeningAndBeforeClosingNonemptyBraces?: boolean;
insertSpaceAfterOpeningAndBeforeClosingTemplateStringBraces?: boolean;
insertSpaceAfterOpeningAndBeforeClosingJsxExpressionBraces?: boolean;
insertSpaceAfterTypeAssertion?: boolean;
insertSpaceBeforeFunctionParenthesis?: boolean;
placeOpenBraceOnNewLineForFunctions?: boolean;
placeOpenBraceOnNewLineForControlBlocks?: boolean;
insertSpaceBeforeTypeAnnotation?: boolean;
semicolons?: SemicolonPreference;
}
interface UserPreferences {
readonly disableSuggestions?: boolean;
readonly quotePreference?: "auto" | "double" | "single";
/**
* If enabled, TypeScript will search through all external modules' exports and add them to the completions list.
* This affects lone identifier completions but not completions on the right hand side of `obj.`.
*/
readonly includeCompletionsForModuleExports?: boolean;
/**
* Enables auto-import-style completions on partially-typed import statements. E.g., allows
* `import write|` to be completed to `import { writeFile } from "fs"`.
*/
readonly includeCompletionsForImportStatements?: boolean;
/**
* Allows completions to be formatted with snippet text, indicated by `CompletionItem["isSnippet"]`.
*/
readonly includeCompletionsWithSnippetText?: boolean;
/**
* If enabled, the completion list will include completions with invalid identifier names.
* For those entries, The `insertText` and `replacementSpan` properties will be set to change from `.x` property access to `["x"]`.
*/
readonly includeCompletionsWithInsertText?: boolean;
/**
* Unless this option is `false`, or `includeCompletionsWithInsertText` is not enabled,
* member completion lists triggered with `.` will include entries on potentially-null and potentially-undefined
* values, with insertion text to replace preceding `.` tokens with `?.`.
*/
readonly includeAutomaticOptionalChainCompletions?: boolean;
/**
* If enabled, completions for class members (e.g. methods and properties) will include
* a whole declaration for the member.
* E.g., `class A { f| }` could be completed to `class A { foo(): number {} }`, instead of
* `class A { foo }`.
*/
readonly includeCompletionsWithClassMemberSnippets?: boolean;
/**
* If enabled, object literal methods will have a method declaration completion entry in addition
* to the regular completion entry containing just the method name.
* E.g., `const objectLiteral: T = { f| }` could be completed to `const objectLiteral: T = { foo(): void {} }`,
* in addition to `const objectLiteral: T = { foo }`.
*/
readonly includeCompletionsWithObjectLiteralMethodSnippets?: boolean;
/**
* Indicates whether {@link CompletionEntry.labelDetails completion entry label details} are supported.
* If not, contents of `labelDetails` may be included in the {@link CompletionEntry.name} property.
*/
readonly useLabelDetailsInCompletionEntries?: boolean;
readonly allowIncompleteCompletions?: boolean;
readonly importModuleSpecifierPreference?: "shortest" | "project-relative" | "relative" | "non-relative";
/** Determines whether we import `foo/index.ts` as "foo", "foo/index", or "foo/index.js" */
readonly importModuleSpecifierEnding?: "auto" | "minimal" | "index" | "js";
readonly allowTextChangesInNewFiles?: boolean;
readonly lazyConfiguredProjectsFromExternalProject?: boolean;
readonly providePrefixAndSuffixTextForRename?: boolean;
readonly provideRefactorNotApplicableReason?: boolean;
readonly allowRenameOfImportPath?: boolean;
readonly includePackageJsonAutoImports?: "auto" | "on" | "off";
readonly jsxAttributeCompletionStyle?: "auto" | "braces" | "none";
readonly displayPartsForJSDoc?: boolean;
readonly generateReturnInDocTemplate?: boolean;
readonly includeInlayParameterNameHints?: "none" | "literals" | "all";
readonly includeInlayParameterNameHintsWhenArgumentMatchesName?: boolean;
readonly includeInlayFunctionParameterTypeHints?: boolean;
readonly includeInlayVariableTypeHints?: boolean;
readonly includeInlayVariableTypeHintsWhenTypeMatchesName?: boolean;
readonly includeInlayPropertyDeclarationTypeHints?: boolean;
readonly includeInlayFunctionLikeReturnTypeHints?: boolean;
readonly includeInlayEnumMemberValueHints?: boolean;
readonly autoImportFileExcludePatterns?: string[];
/**
* Indicates whether {@link ReferencesResponseItem.lineText} is supported.
*/
readonly disableLineTextInReferences?: boolean;
}
interface CompilerOptions {
allowJs?: boolean;
allowSyntheticDefaultImports?: boolean;
allowUnreachableCode?: boolean;
allowUnusedLabels?: boolean;
alwaysStrict?: boolean;
baseUrl?: string;
charset?: string;
checkJs?: boolean;
declaration?: boolean;
declarationDir?: string;
disableSizeLimit?: boolean;
downlevelIteration?: boolean;
emitBOM?: boolean;
emitDecoratorMetadata?: boolean;
experimentalDecorators?: boolean;
forceConsistentCasingInFileNames?: boolean;
importHelpers?: boolean;
inlineSourceMap?: boolean;
inlineSources?: boolean;
isolatedModules?: boolean;
jsx?: JsxEmit | ts.JsxEmit;
lib?: string[];
locale?: string;
mapRoot?: string;
maxNodeModuleJsDepth?: number;
module?: ModuleKind | ts.ModuleKind;
moduleResolution?: ModuleResolutionKind | ts.ModuleResolutionKind;
newLine?: NewLineKind | ts.NewLineKind;
noEmit?: boolean;
noEmitHelpers?: boolean;
noEmitOnError?: boolean;
noErrorTruncation?: boolean;
noFallthroughCasesInSwitch?: boolean;
noImplicitAny?: boolean;
noImplicitReturns?: boolean;
noImplicitThis?: boolean;
noUnusedLocals?: boolean;
noUnusedParameters?: boolean;
noImplicitUseStrict?: boolean;
noLib?: boolean;
noResolve?: boolean;
out?: string;
outDir?: string;
outFile?: string;
paths?: MapLike<string[]>;
plugins?: PluginImport[];
preserveConstEnums?: boolean;
preserveSymlinks?: boolean;
project?: string;
reactNamespace?: string;
removeComments?: boolean;
references?: ProjectReference[];
rootDir?: string;
rootDirs?: string[];
skipLibCheck?: boolean;
skipDefaultLibCheck?: boolean;
sourceMap?: boolean;
sourceRoot?: string;
strict?: boolean;
strictNullChecks?: boolean;
suppressExcessPropertyErrors?: boolean;
suppressImplicitAnyIndexErrors?: boolean;
useDefineForClassFields?: boolean;
target?: ScriptTarget | ts.ScriptTarget;
traceResolution?: boolean;
resolveJsonModule?: boolean;
types?: string[];
/** Paths used to used to compute primary types search locations */
typeRoots?: string[];
[option: string]: CompilerOptionsValue | undefined;
}
const enum JsxEmit {
None = "None",
Preserve = "Preserve",
ReactNative = "ReactNative",
React = "React"
}
const enum ModuleKind {
None = "None",
CommonJS = "CommonJS",
AMD = "AMD",
UMD = "UMD",
System = "System",
ES6 = "ES6",
ES2015 = "ES2015",
ESNext = "ESNext"
}
const enum ModuleResolutionKind {
Classic = "Classic",
Node = "Node"
}
const enum NewLineKind {
Crlf = "Crlf",
Lf = "Lf"
}
const enum ScriptTarget {
ES3 = "ES3",
ES5 = "ES5",
ES6 = "ES6",
ES2015 = "ES2015",
ES2016 = "ES2016",
ES2017 = "ES2017",
ES2018 = "ES2018",
ES2019 = "ES2019",
ES2020 = "ES2020",
ES2021 = "ES2021",
ES2022 = "ES2022",
ESNext = "ESNext"
}
const enum ClassificationType {
comment = 1,
identifier = 2,
keyword = 3,
numericLiteral = 4,
operator = 5,
stringLiteral = 6,
regularExpressionLiteral = 7,
whiteSpace = 8,
text = 9,
punctuation = 10,
className = 11,
enumName = 12,
interfaceName = 13,
moduleName = 14,
typeParameterName = 15,
typeAliasName = 16,
parameterName = 17,
docCommentTagName = 18,
jsxOpenTagName = 19,
jsxCloseTagName = 20,
jsxSelfClosingTagName = 21,
jsxAttribute = 22,
jsxText = 23,
jsxAttributeStringLiteralValue = 24,
bigintLiteral = 25
}
}
declare namespace ts.server.protocol {
interface TextInsertion {
newText: string;
/** The position in newText the caret should point to after the insertion. */
caretOffset: number;
}
interface TodoCommentDescriptor {
text: string;
priority: number;
}
interface TodoComment {
descriptor: TodoCommentDescriptor;
message: string;
position: number;
}
enum OutliningSpanKind {
/** Single or multi-line comments */
Comment = "comment",
/** Sections marked by '// #region' and '// #endregion' comments */
Region = "region",
/** Declarations and expressions */
Code = "code",
/** Contiguous blocks of import declarations */
Imports = "imports"
}
enum HighlightSpanKind {
none = "none",
definition = "definition",
reference = "reference",
writtenReference = "writtenReference"
}
enum ScriptElementKind {
unknown = "",
warning = "warning",
/** predefined type (void) or keyword (class) */
keyword = "keyword",
/** top level script node */
scriptElement = "script",
/** module foo {} */
moduleElement = "module",
/** class X {} */
classElement = "class",
/** var x = class X {} */
localClassElement = "local class",
/** interface Y {} */
interfaceElement = "interface",
/** type T = ... */
typeElement = "type",
/** enum E */
enumElement = "enum",
enumMemberElement = "enum member",
/**
* Inside module and script only
* const v = ..
*/
variableElement = "var",
/** Inside function */
localVariableElement = "local var",
/**
* Inside module and script only
* function f() { }
*/
functionElement = "function",
/** Inside function */
localFunctionElement = "local function",
/** class X { [public|private]* foo() {} } */
memberFunctionElement = "method",
/** class X { [public|private]* [get|set] foo:number; } */
memberGetAccessorElement = "getter",
memberSetAccessorElement = "setter",
/**
* class X { [public|private]* foo:number; }
* interface Y { foo:number; }
*/
memberVariableElement = "property",
/** class X { [public|private]* accessor foo: number; } */
memberAccessorVariableElement = "accessor",
/**
* class X { constructor() { } }
* class X { static { } }
*/
constructorImplementationElement = "constructor",
/** interface Y { ():number; } */
callSignatureElement = "call",
/** interface Y { []:number; } */
indexSignatureElement = "index",
/** interface Y { new():Y; } */
constructSignatureElement = "construct",
/** function foo(*Y*: string) */
parameterElement = "parameter",
typeParameterElement = "type parameter",
primitiveType = "primitive type",
label = "label",
alias = "alias",
constElement = "const",
letElement = "let",
directory = "directory",
externalModuleName = "external module name",
/**
* <JsxTagName attribute1 attribute2={0} />
* @deprecated
*/
jsxAttribute = "JSX attribute",
/** String literal */
string = "string",
/** Jsdoc @link: in `{@link C link text}`, the before and after text "{@link " and "}" */
link = "link",
/** Jsdoc @link: in `{@link C link text}`, the entity name "C" */
linkName = "link name",
/** Jsdoc @link: in `{@link C link text}`, the link text "link text" */
linkText = "link text"
}
export interface TypeAcquisition {
/**
* @deprecated typingOptions.enableAutoDiscovery
* Use typeAcquisition.enable instead.
*/
enableAutoDiscovery?: boolean;
enable?: boolean;
include?: string[];
exclude?: string[];
disableFilenameBasedTypeAcquisition?: boolean;
[option: string]: CompilerOptionsValue | undefined;
}
export type CompilerOptionsValue = string | number | boolean | (string | number)[] | string[] | MapLike<string[]> | PluginImport[] | ProjectReference[] | null | undefined;
export interface FileExtensionInfo {
extension: string;
isMixedContent: boolean;
scriptKind?: ScriptKind;
}
/**
* Type of objects whose values are all of the same type.
* The `in` and `for-in` operators can *not* be safely used,
* since `Object.prototype` may be modified by outside code.
*/
interface MapLike<T> {
[index: string]: T;
}
export interface PluginImport {
name: string;
}
export interface ProjectReference {
/** A normalized path on disk */
path: string;
/** The path as the user originally wrote it */
originalPath?: string;
/** True if the output of this reference should be prepended to the output of this project. Only valid for --outFile compilations */
prepend?: boolean;
/** True if it is intended that this reference form a circularity */
circular?: boolean;
}
}
declare namespace ts {
// these types are empty stubs for types from services and should not be used directly
export type EndOfLineState = never;
export type ScriptKind = never;
export type WatchFileKind = never;
export type WatchDirectoryKind = never;
export type PollingWatchKind = never;
export type IndentStyle = never;
export type JsxEmit = never;
export type ModuleKind = never;
export type ModuleResolutionKind = never;
export type NewLineKind = never;
export type ScriptTarget = never;
}
import protocol = ts.server.protocol;
export = protocol;
export as namespace protocol; | PypiClean |
/FileCrawler-0.1.8.tar.gz/FileCrawler-0.1.8/filecrawler/filecrawler.py | import codecs
import hashlib
import io
import shutil
import tempfile
from .libs.process import Process
try:
from .config import Configuration
except (ValueError, ImportError) as e:
raise Exception('You may need to run filecrawler from the root directory (which includes README.md)', e)
import sys, datetime, os
from filecrawler.libs.color import Color
from filecrawler.libs.logger import Logger
from .util.tools import Tools
class FileCrawler(object):
def main(self):
''' Either performs action based on arguments, or starts attack scanning '''
self.dependency_check()
Configuration.initialize()
if not Configuration.initialized:
return
self.run()
def dependency_check(self):
''' Check that required programs are installed '''
required_apps = []
optional_apps = []
missing_required = False
missing_optional = False
for app in required_apps:
if not Process.exists(app):
missing_required = True
Color.pl('{!} {R}error: required app {O}%s{R} was not found' % app)
for app in optional_apps:
if not Process.exists(app):
missing_optional = True
Color.pl('{!} {O}warning: recommended app {R}%s{O} was not found' % app)
if missing_required:
Color.pl('{!} {R}required app(s) were not found, exiting.{W}')
sys.exit(-1)
if missing_optional:
Color.pl('{!} {O}recommended app(s) were not found')
Color.pl('{!} {O}filecrawler may not work as expected{W}')
def run(self):
try:
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
Logger.pl('{+} {C}start time {O}%s{W}' % timestamp)
FileCrawler.load_tika()
# Execute the specific actions
Configuration.module.run()
except Exception as e:
Color.pl("\n{!} {R}Error: {O}%s" % str(e))
if Configuration.verbose > 0 or True:
Color.pl('\n{!} {O}Full stack trace below')
from traceback import format_exc
Color.p('\n{!} ')
err = format_exc().strip()
err = err.replace('\n', '\n{W}{!} {W} ')
err = err.replace(' File', '{W}{D}File')
err = err.replace(' Exception: ', '{R}Exception: {O}')
Color.pl(err)
except KeyboardInterrupt as e:
raise e
timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
Logger.pl('{+} {C}End time {O}%s{W}' % timestamp)
Logger.pl(' ')
def print_banner(self):
""" Displays ASCII art of the highest caliber. """
Color.pl(Configuration.get_banner())
@staticmethod
def load_tika():
from .libs.process import Process
import logging
if not Configuration.ocr_enabled:
return
try:
tika_proc = Process.find_process('tika-server.jar')
if tika_proc is not None:
if Configuration.verbose >= 1:
Color.pl('{?} Killing tika server with PID: {G}%s{W}' % tika_proc[0])
Process.kill(pid=tika_proc[0])
if Configuration.verbose >= 1:
Color.pl('{?} Loading tika: {GR}')
p = tempfile.gettempdir()
os.environ["TIKA_PATH"] = p
os.environ["TIKA_LOG_PATH"] = p
os.environ["TIKA_SERVER_JAR"] = 'tika-server.jar'
jar_file = os.path.join(p, 'tika-server.jar')
shutil.copytree(
os.path.join(Configuration.lib_path, 'bin'),
p,
dirs_exist_ok=True)
# create md5 hash file
m = hashlib.md5()
with open(jar_file, 'rb') as f:
binContents = f.read()
m.update(binContents)
with open(jar_file + ".md5", "w") as em:
em.write(m.hexdigest())
file = os.path.join(Configuration.lib_path, 'bin', 'loader.pdf')
import tika
from tika import parser
#Change log level
if Configuration.verbose == 0:
log = logging.getLogger('tika.tika')
log.setLevel(logging.ERROR)
headers = {
"X-Tika-OCRLanguage": f"eng+{Configuration.ocr_language}",
"X-Tika-PDFocrStrategy": Configuration.ocr_pdf_strategy
}
parser.from_file(file, headers=headers)
Color.pl('{+} Tika lib loaded {W}')
finally:
Color.p('{W}')
# Used to supress libmagic error 'lhs/off overflow 4294967295 0'
# https://bugs.astron.com/view.php?id=426
# This code will suppress any child process stderr output
@staticmethod
def redirect_stderr():
new = os.dup(2) # Create a copy of stderr (new)
sys.stderr = io.TextIOWrapper(os.fdopen(new, 'wb'))
_file = tempfile.TemporaryFile(mode='w+t')
os.dup2(_file.fileno(), 2) # Redirect stdout into tmp
def run():
# Explicitly changing the stdout encoding format
if sys.stdout.encoding is None:
# Output is redirected to a file
sys.stdout = codecs.getwriter('latin-1')(sys.stdout)
FileCrawler.redirect_stderr()
o = FileCrawler()
o.print_banner()
try:
o.main()
except Exception as e:
Color.pl('\n{!} {R}Error:{O} %s{W}' % str(e))
if Configuration.verbose > 0 or True:
Color.pl('\n{!} {O}Full stack trace below')
from traceback import format_exc
Color.p('\n{!} ')
err = format_exc().strip()
err = err.replace('\n', '\n{W}{!} {W} ')
err = err.replace(' File', '{W}{D}File')
err = err.replace(' Exception: ', '{R}Exception: {O}')
Color.pl(err)
Color.pl('\n{!} {R}Exiting{W}\n')
Tools.exit_gracefully(1)
except KeyboardInterrupt:
Color.pl('\n{!} {O}interrupted, shutting down...{W}')
Tools.exit_gracefully(2)
Tools.exit_gracefully(0) | PypiClean |
/HanabIRC-1.2.05.tar.gz/HanabIRC-1.2.05/hanabIRC/hanabot.py | import logging
import time
import string
import random
import sys
import os
import traceback
from collections import defaultdict
from itertools import chain, islice
from hanabi import Game
from game_history import game_history
from text_markup import irc_markup
from GameResponse import GameResponse
from irc.bot import SingleServerIRCBot
from irc.client import VERSION as irc_client_version
from hanabIRC import __version__
log = logging.getLogger(__name__)
class Hanabot(SingleServerIRCBot):
def __init__(self, server, channels, nick, nick_pass, port, topic, hist_path):
log.debug('new bot started at %s:%d@#%s as %s', server, port,
channels, nick)
SingleServerIRCBot.__init__(
self,
server_list=[(server, port)],
nickname=nick,
realname='Mumford J. Hanabot')
self.nick_pass = nick_pass
self.nick_name = nick
self.topic = topic
# this should be in the config file so that different network
# rate limiting polices can be specified. These defaults
# are tuned to freenode.
self.connection.set_rate_limit(2)
game_history.hist_file = hist_path
# force channels to start with #
self.home_channels = [c if c[0] == '#' else '#%s' % c for c in channels]
log.debug('Home channels: %s' % self.home_channels)
# valid bot commands
self.command_dict = {
'Game Management': ['new', 'delete', 'join', 'start', 'stop',
'leave', 'part', 'option', 'watch'],
'Hand Management': ['move', 'swap', 'sort'],
'Game Action': ['play', 'hint', 'discard'],
'Information': ['help', 'rules', 'turn', 'turns', 'game', 'hints',
'games', 'hands', 'table', 'discardpile', 'version',
'last']
}
self.commands = list()
for cmds in self.command_dict.values():
self.commands += cmds
self.commands_admin = ['die']
# these commands can execute without an active game.
# otherwise the command handlers can assume an active game.
self.no_game_commands = ['new', 'join', 'help', 'rules', 'game', 'games', 'part',
'version', 'last']
# games is a dict indexed by channel name, value is the Game object.
self.games = dict()
# lib IRC callbacks
#############################################################
def get_version(self):
'''raises exception in lib irc, so overload in the bot.'''
return "Python irc.bot 8.0"
def on_nicknameinuse(self, conn, event):
conn.nick(conn.get_nickname() + "_")
def on_welcome(self, conn, event):
if self.nick_pass:
msg = 'IDENTIFY %s %s' % (self.nick_name, self.nick_pass)
self.connection.privmsg('NickServ', msg)
for chan in self.home_channels:
conn.join(chan)
def on_kick(self, conn, event):
time.sleep(1)
conn.join(self.channel)
conn.notice(self.channel, 'Why I outta....')
def on_join(self, conn, event):
log.debug('got on_join: %s %s', conn, event)
# This is not the proper spot for this. This is when anyone
# joins, not the bot.
# if self.topic:
# self.connection.topic(event.target, self.topic)
def on_privmsg(self, conn, event):
log.debug('got privmsg. %s -> %s', event.source, event.arguments)
# If this is a priv msg, we need to reset the event to look
# like a channel message.
if str(event.target) == self.nick_name:
for chan, game in self.games.iteritems():
if game.in_game(event.source.nick):
event.target = chan
break
else:
# note: this else is on the for() not the if()
msg = ('If you are not in a game (in a channel) I cannot map this private '
'message to a channel or a game, so I have no context to respond. Join '
'a game and try again.')
self._to_nick(event, msg)
return
self.on_pubmsg(conn, event)
def on_pubmsg(self, conn, event):
try:
log.debug('got pubmsg. %s -> %s', event.source, event.arguments)
# messaged commands
a = event.arguments[0].split(':', 1)
if len(a) > 1 and string.lower(a[0]) == string.lower(
self.connection.get_nickname()):
self.parse_commands(event, [a[1].strip()] + event.arguments[1:])
# general channel commands
if len(event.arguments[0]) and event.arguments[0][0] == '!':
log.debug('got channel command: %s', event.arguments[0][1:])
# rebuild the list w/out the ! at start of the first arg
self.parse_commands(event,
[event.arguments[0][1:]] + event.arguments[1:])
except Exception, e:
log.critical('Got exception when handling message: %s' % e)
def on_nick(self, conn, event):
before = event.source.nick
after = event.target
for chan, game in self.games.iteritems():
if game.in_game(before):
if game.replace_player(before, after):
self.connection.notice(chan,
'Replaced %s with %s in game in %s' % (
before, after, chan))
def parse_commands(self, event, cmds):
try:
log.debug('got command. %s --> %s : %s',
event.source.nick, event.target, event.arguments)
nick = event.source.nick
if not cmds:
return ([], 'Giving a command would be more useful.')
# I don't understand when args will ever be more than just a string of
# space separated words - need more IRC lib experience or docs.
cmds = [str(c) for c in cmds[0].split()]
# check for empty command, i.e. !
if not cmds:
return ([], 'Giving a command would be more useful.')
# op only commands - return after executing.
#if cmds[0] in self.commands_admin:
# log.debug('running admin cmd %s', cmds[0])
# for chname, chobj in self.channels.items():
# if nick in chobj.opers():
# if cmds[0] == 'die':
# self.die('Seppuku Successful')
# return
# valid user command check
if not cmds[0] in self.commands:
self._to_nick(event, 'My dearest brother Willis, I do not '
'understand this "%s" of which you speak.' %
' '.join(cmds))
return
# call the appropriate handle_* function.
method = getattr(self, 'handle_%s' % cmds[0], None)
if method:
if not cmds[0] in self.no_game_commands + ['xyzzy']:
if not event.target in self.games:
msg = 'There is no active game in %s! Start one with !new.' % event.target
self._to_chan(event, msg)
return
# invoke it!
method(cmds[1:], event)
# clear possibly ended game after action.
if event.target in self.games:
if self.games[event.target].game_over():
g = self.games[event.target]
game_history.add_game(g.score(), g.players(),
g.game_type(), event.target)
for p in g.players():
self.connection.privmsg('ChanServ', 'devoice %s %s'
% (event.target, p))
del self.games[event.target]
except Exception, e:
exc_type, exc_value, exc_tb = sys.exc_info()
filename, line_num, func_name, text = traceback.extract_tb(exc_tb)[-1]
filename = os.path.basename(filename)
errs = ['Exception in parse_command: %s' % e,
'Error in file %s:%s in %s().' % (filename, line_num, func_name),
'Error text: %s' % text]
self._to_chan(event, 'Does not compute. Unknown error happened. All bets are'
' off about game(s) state. Guru contemplation haiku:')
for err in errs:
log.critical('%s', err)
self._to_chan(event, err)
# some sugar for sending msgs
def _display(self, response, event, notice=False):
'''response is a GameResponse instance. event is an irclib event, which gives us nick and channel.'''
# this whole function should be in a thread. We rate limit
# by sleeping here, which puts all other games are on pause.
# which is bad.
if not response:
log.error('Got False response, not displaying output.')
else:
for line in response.public:
if notice:
self.connection.notice(event.target, line)
else:
self.connection.privmsg(event.target, line)
# to user is always a notice.
for nick, lines in response.private.iteritems():
for line in lines:
self.connection.notice(nick, line)
# some sugar for sending msgs
def _to_chan(self, event, msgs):
self._display(GameResponse(msgs), event)
# some sugar for sending strings
def _to_nick(self, event, msgs):
self._display(GameResponse(private={event.source.nick: msgs}), event)
# Game Commands
#############################################################
def handle_help(self, args, event):
log.debug('got help event. args: %s', args)
if not args:
usage = list()
usage.append(
'A game is created via !new, then 2 to 5 people !join the '
'game, and someone calls !start to start the game. Once '
'started, players take turns either !playing a card, '
'!discarding a card, or giving another player a !hint. '
'After a valid !play or !discard the state of the '
'table is shown. The table state can also be seen '
'with the !table command. The turn order is shown with !turns.')
usage.append(
'Players can use !hands to view all hands at the table, '
'including their own. Your own hand is shown with the "backs" '
'facing you. When any card is added to your hand, it is assigned '
' a letter A-E, allowing you to track individial cards as they move around.'
' When a card leaves your hand its letter is assigned to the '
'incoming card. ')
usage.append('You reference your own '
'hands via these letters, e.g. "!play C" or "!discard A". You '
'can arrange your hand via !swap, !sort, and !move.')
usage.append(
'Hints are given by the !hint command. The hint format '
'is "!hint nick color|number". Valid numbers are 1-5; '
'valid colors are white, yellow, red, blue, or green.'
' Example: "!hint xyzzy blue" or "!hint fred 3"')
usage.append(
'The game continues until the deck is empty, all the cards '
'are correcly displayed on the table, or the three storm '
'tokens have been flipped.')
usage.append('To start a new game in a different channel, use !new '
'chan_name. The bot will join that channel and you can start '
'a new game.')
usage.append('!games shows status of all games in all channels.')
usage.append('The bot supports rainbow cards. !help start for details.')
for text, cmds in self.command_dict.iteritems():
usage.append('%s commands: %s' % (text, ', '.join(cmds)))
usage.append('Doing "!help [command]" will give details on that command.')
self._to_nick(event, usage)
return
if args[0] in Hanabot._command_usage:
self._to_nick(event, Hanabot._command_usage[args[0]])
else:
self._to_nick(event, 'No help for topic %s' % args[0])
def handle_hint(self, args, event):
log.debug('got hint event. args: %s', args)
if not self._check_args(args, 2, [str, str], event, 'hint'):
return
# now tell the engine about the !hint
nick = event.source.nick
self._display(self.games[event.target].hint_player(nick, player=args[0], hint=args[1]), event)
def handle_rules(self, args, event):
log.debug('got rules event. args: %s', args)
if not self._check_args(args, 0, [], event, 'rules'):
return
self._to_nick(event, 'Go here for english rules: '
'http://boardgamegeek.com/filepage/85023/english-translation-of-'
'abacusspiele-german-rules')
def _game_state(self, channel):
ret = GameResponse()
log.debug('game_state: chan: %s (%s), games: %s', channel, type(channel), self.games)
if channel not in self.games:
ret.public.append('There is no game being played in %s. '
'Use !new to start one while in %s.' % (channel, channel))
return ret
game = self.games[channel]
state = 'being played' if game.has_started() else 'waiting for players'
if not game.has_started():
if len(game.players()):
ps = game.players()
s = ('Waiting for players in %s. %d players have joined '
'so far: %s.' % (channel, len(ps), ', '.join(ps)))
else:
s = ('Waiting for players in %s, no players have '
'joined yet.' % channel)
else:
turn = game.turn().public[0]
s = ('Game is active in %s and being played by players %s. %s' %
(channel, ', '.join(game.players()), turn))
ret.public.append(s)
return ret
def handle_version(self, args, event):
self._to_chan(event, 'version: %s' % __version__)
def handle_last(self, args, event):
if not len(args):
n = 10
search_str = None
else:
if len(args) >= 1:
try:
n = int(args[0])
if len(args) >= 2:
search_str = str(' '.join(args[1:]))
else:
search_str = None
except ValueError:
self._to_nick(event, 'Wrong type for argument in !last command.')
self.handle_help(['last'], event)
return
else:
self._to_nick(event, "Wrong number of arguments to !last.")
self.handle_help(['last'], event)
return
nick = event.source.nick
if n < 0:
self._to_nick(event, 'Why not just ask for the last sqrt(-1) games? Jeeze.')
return
elif n == 0:
self._to_nick(event, 'Nothing happens.')
return
elif n > 20:
self._to_nick(event, 'Giving you the last 20 instead of the unreasonable '
'number you asked for.')
n = 20
self._display(game_history.last_games(nick, n, search_str), event)
def handle_game(self, args, event):
log.debug('got game event. args: %s', args)
if not self._check_args(args, 0, [], event, 'game'):
return
self._display(self._game_state(event.target), event)
def handle_games(self, args, event):
log.debug('got games event. args: %s', args)
if not self._check_args(args, 0, [], event, 'games'):
return
# iterate over all channels the bot is in.
for chan in self.channels.keys():
self._display(self._game_state(str(chan)), event)
def handle_watch(self, args, event):
nick = event.source.nick
self._display(self.games[event.target].add_watcher(nick), event)
def handle_stop(self, args, event):
if not self._check_args(args, 0, [], event, 'stop'):
return
nick = event.source.nick
self._display(self.games[event.target].stop_game(nick), event)
def handle_turn(self, args, event):
if not self._check_args(args, 0, [], event, 'turn'):
return
self._display(self.games[event.target].turn(), event)
def handle_turns(self, args, event):
if not self._check_args(args, 0, [], event, 'turns'):
return
self._display(self.games[event.target].turns(), event)
def handle_table(self, args, event):
log.debug('got table command.')
if not self._check_args(args, 0, [], event, 'table'):
return
self._display(self.games[event.target].get_table(), event)
def handle_discard(self, args, event):
log.debug('got discard event. args: %s', args)
if not self._check_args(args, 1, [str], event, 'discard'):
return
# discard the card and show the repsonse
nick = event.source.nick
self._display(self.games[event.target].discard_card(nick, args[0]), event)
def handle_play(self, args, event):
log.debug('got play event. args: %s', args)
# play the card and show the repsonse
if not self._check_args(args, 1, [str], event, 'play'):
return
nick = event.source.nick
self._display(self.games[event.target].play_card(nick, args[0]), event)
def handle_option(self, args, event):
self._display(self.games[event.target].game_option(args), event,
notice=True)
def handle_hints(self, args, event):
log.debug('got hints event. args: %s', args)
nick = event.source.nick
if len(args) == 1 and args[0] == 'all':
self._display(self.games[event.target].hints(nick, show_all=True), event)
else:
self._display(self.games[event.target].hints(nick), event)
def handle_hands(self, args, event):
''' Show hands of current game. '''
log.debug('got hands event. args: %s', args)
if not self._check_args(args, 0, [], event, 'hands'):
return
nick = event.source.nick
self._display(self.games[event.target].get_hands(nick), event)
def handle_xyzzy(self, args, event):
self._to_nick(event, 'Nothing happens.')
def handle_new(self, args, event):
''' Create a new game. '''
log.debug('got new game event')
if len(args) == 1:
chan = args[0] if args[0][0] == '#' else '#' + args[0]
self.connection.join(chan)
self._to_chan(event, 'Hanabot joined channel %s. /join %s and !new '
'to begin game there.' % (chan, chan))
return
if not self._check_args(args, 0, [], event, 'new'):
return
nick = event.source.nick
if event.target in self.games:
self._to_nick(event, 'There is already an active game in the channel.')
return
log.info('Starting new game.')
self.games[event.target] = Game()
self._display(GameResponse('New game started by %s. Accepting joins.' % nick),
event, notice=True)
m = irc_markup()
name = ''
for i, c in enumerate('Hanabi'):
name += m.color(c, m.Colors[i % len(m.Colors)])
msg = 'New game of %s starting in channel %s.' % (name, event.target)
for chan in self.home_channels:
log.debug('game notification sent to %s: %s', event.target, msg)
self.connection.notice(chan, msg)
def handle_join(self, args, event):
'''join a game, if one is active.'''
log.debug('got join event')
if not self._check_args(args, 0, [], event, 'join'):
return
chan = event.target
nick = event.source.nick
# # enforce one game per player. Will not be needed if force users to
# # send commands from the channel, so I can key the game to the channel.
# for chan, g in self.games.iteritems():
# if nick in g.players():
# msg = ('You are already in a game in %s. One game per nick '
# 'per channel please.' % chan)
# self._to_nick(event, msg)
# return
if not chan in self.games:
self._to_chan(event, 'There is no game started in %s, create one '
'with !new' % chan)
return
if not self.channels[chan].is_voiced(nick):
self.connection.privmsg('ChanServ', 'voice %s %s' % (chan, nick))
self._display(self.games[chan].add_player(nick), event)
# GTL TODO: make sure this is called when the players leaves the channel?
def handle_leave(self, args, event):
'''leave an active game.'''
log.debug('got leave event. args: %s', args)
if not self._check_args(args, 0, [], event, 'leave'):
return
nick = event.source.nick
chan = event.target
self.connection.privmsg('ChanServ', 'devoice %s %s' % (chan, nick))
# remove the player and display the result
self._display(self.games[event.target].remove_player(nick), event)
def handle_sort(self, args, event):
'''arg format: []'''
log.debug('got handle_sort event. args: %s', args)
if not self._check_args(args, 0, [], event, 'sort'):
return
nick = event.source.nick
self._display(self.games[event.target].sort_cards(nick), event)
def handle_move(self, args, event):
'''arg format: cardX slotN.'''
log.debug('got handle_move event. args: %s', args)
if not self._check_args(args, 2, [str, int], event, 'move'):
return
nick = event.source.nick
self._display(self.games[event.target].move_card(nick, args[0], args[1]), event)
def handle_swap(self, args, event):
'''arg format: cardA cardB.'''
log.debug('got handle_swap event. args: %s', args)
if not self._check_args(args, 2, [str, str], event, 'swap'):
return
# do the swap
nick = event.source.nick
self._display(self.games[event.target].swap_cards(nick, args[0], args[1]), event)
def handle_start(self, args, event):
log.debug('got start event')
opts = dict()
if len(args):
for a in args:
opts[a] = True
elif not self._check_args(args, 0, [], event, 'start'):
return
nick = event.source.nick
opts = opts if len(opts) else None
self._display(self.games[event.target].start_game(nick, opts), event)
def handle_part(self, args, event):
log.debug('got part event')
if not self._check_args(args, 0, [], event, 'part'):
return
if not event.target in self.home_channels:
self._to_chan(event, 'Hanabot leaving channel.')
self.connection.part(event.target)
else:
self._to_chan(event, 'Hanabot refuses to leave home channel. Nice try.')
def handle_delete(self, args, event):
log.debug('got delete event')
if not self._check_args(args, 0, [], event, 'delete'):
return
for p in self.games[event.target].players():
self.connection.privmsg('ChanServ', 'devoice %s %s' % (event.target, p))
del self.games[event.target]
self._to_chan(event, '%s deleted game.' % event.source.nick)
def handle_discardpile(self, args, event):
log.debug('got discardpile event')
if not self._check_args(args, 0, [], event, 'discardpile'):
return
nick = event.source.nick
self._display(self.games[event.target].get_discard_pile(nick), event)
def _check_args(self, args, num, types, event, cmd):
'''Check the given arguments for correct types and number. Show error
message and help to nick on error and return False. Else return True.
As a side effect, set the types correctly. e.g. "1.2" is set to 1.2.'''
if len(args) != num:
self._to_nick(event, 'Wrong number of arguments to %s' % cmd)
self.handle_help([cmd], event)
return False
elif len(types) != num:
# This is an internal callee error, so no message.
log.info('internal error in _check_args: wrong number of types passed in.')
return False
for i in xrange(len(args)):
try:
args[i] = types[i](args[i])
except ValueError:
self._to_nick(event, 'Wrong type for argument %s in command %s.' % (args[i], cmd))
self.handle_help([cmd], event)
return False
return True
####### static class data
_command_usage = {
'new': '!new [channel] - create a new game. If channel is given, hanabot will join that channel. (Then use !new in that channel to create a new game there.)',
'delete': '!delete - delete a game. Deleted games are not added to game history.',
'join': '!join - join a game. If not game in channel, use !new to create one.',
'start': '!start [rainbow_5 | rainbow_10] - start a game. The game must have at least two players. If rainbow_5 is given, 5 rainbow cards will be added to the deck. If rainbow_10 is given, 10 rainbow cards will be added.',
'stop': 'Immediately score a game, then stop/kill it.',
'leave': '!leave - leave a game. If you are player, this is bad form. If you are watching the game (via !watch) you will no longer receive hand updates.',
'part': '!part - tell Hanabot to part the channel. Note: Hanabot will not leave its home channel.',
'move': '!move card - move a card in your hand and slide all other cards "right". "card" must be one of A, B, C, D, or E. "index" is where to put the card, counting from the left and must be an integer between 1 and max hand size.',
'swap': '!swap card card - swap cards in your hand. Card arguments must be one of A, B, C, D, or E.',
'sort': '!sort - sort your cards into "correct" order, i.e. into ABCDE order from "mixed" state.',
'play': '!play card - play the card to the table. "card" must be one of A, B, C, D, or E.',
'hint': '!hint nick color|number - give a hint to a player about which color or number cards are in their hand. Valid colors: red, blue, white, green, yellow (or r, b, w, g, y) (case insensitive); valid numbers are 1, 2, 3, 4, or 5. Example "!hint frobozz blue" and "!hint plugh 4"',
'discard': '!discard card - place a card in the discard pile. "card" must be one of A, B, C, D, or E.',
'help': 'Infinite recursion detected. Universe is rebooting...',
'rules': '!rules - show URL for (english) Hanabi rules.',
'turn': '!turn - show which players turn it is.',
'turns': '!turns - show turn order in current play ordering.',
'game': '!game - show the game state for current channel.',
'games': '!games - show game states for all channels hanabot has joined.',
'hints': '!hints [all] - show the hints given in the current game. If "all" is given, show all hints otherwise show only hints given to you.',
'last': '!last [n [filter]] - Show the results of the last N games. If n not given, then show results for the last 10 games. If [filter] is given, filter the list by the string given.',
'option': '!option [opt1 opt2 ... ] - If no arguments given, list current game options. Otherwise set the options given.',
'hands': '!hands - show hands of players. Your own hand will be shown with the "backs" facing you, identified individually by a letter. When a card is removed the letter is reused for the new card.',
'table': '!game - show the state of the table',
'watch': '!watch - join the game as a spectator. This means you get notices of hands after a move.',
'discardpile': '!discardpile - show the current discard pile.',
'grue': 'You are likely to be eaten.',
'version': 'Show the version of the bot.',
}
if __name__ == "__main__":
# Currently there is no doctest here. This is for catching systax errors.
import doctest
doctest.testmod() | PypiClean |
/HiNT-Package-2.2.8.tar.gz/HiNT-Package-2.2.8/HiNT/getGenomeRowSumsFromHiC.py | import numpy as np
from HiNT.straw import *
import os,sys
def get_chromInfo(chromlf):
chroms = []
infos = {}
inf = open(chromlf)
for line in inf:
line = line.strip().split('\t')
infos[line[0]] = int(line[1])
chroms.append(line[0])
return chroms,infos
def getSumPerChrom(i, j, hicfile, binsize, chroms, chromInfo, sumInfo):
chrom1 = chroms[i] #the primary chromosome
chrom2 = chroms[j] #the supplementary chromosome
chr1 = chrom1.lstrip('chr')
chr2 = chrom2.lstrip('chr')
result = straw('NONE', hicfile, str(chr1), str(chr2), 'BP', binsize)
#x is the coordinate for i, and y is the coordinate for j
if i < j:
xs = np.divide(result[0],binsize)
ys = np.divide(result[1],binsize)
else:
xs = np.divide(result[1],binsize)
ys = np.divide(result[0],binsize)
values = np.array(result[2])
chrom1length = chromInfo[chrom1]
for n in sumInfo:
if i != j:
idx = np.where(xs == n)
else:
idx1 = np.where(xs == n)
idx2 = np.where(ys == n)
idx = np.unique(np.concatenate((idx1,idx2), axis=1))
nvalues = values[idx]
nsum = np.nansum(nvalues)
sumInfo[n] += nsum
return sumInfo
def writeGenomeRowSums(sumInfo,outputname,name,baseIdx):
outf = open(outputname,'w')
header=['',name]
outf.write('\t'.join(header) + '\n')
binsIdx = list(sumInfo.keys())
binsIdx.sort()
for idx in binsIdx:
newidx = idx + baseIdx
res = [str(newidx), str(sumInfo[idx])]
outf.write('\t'.join(res) + '\n')
outf.close()
lastIdx = binsIdx[-1] + baseIdx
return lastIdx
def getGenomeRowSums(resolution, hicfile, chromlf, outputdir,name):
rowSumFilesInfo = {}
chroms,chromInfo = get_chromInfo(chromlf)
binsize = resolution * 1000
baseIdx = 0
for i in range(len(chroms)-2):
sumInfo = {}
chrom1length = chromInfo[chroms[i]]
binnumber = int(chrom1length/binsize) + 1
for n in range(binnumber):
sumInfo[n] = 0
for j in range(len(chroms)-2):
sumInfo = getSumPerChrom(i, j, hicfile, binsize, chroms, chromInfo, sumInfo)
outputname = os.path.join(outputdir,name + '_%s_%skb_GenomeRowSums.txt'%(chroms[i],str(resolution)))
writeGenomeRowSums(sumInfo,outputname,name,baseIdx)
rowSumFilesInfo[chroms[i]] = outputname
return rowSumFilesInfo | PypiClean |
/JumpScale-core-6.0.0.tar.gz/JumpScale-core-6.0.0/lib/JumpScale/core/baseclasses/BaseType.py | from JumpScale import j
from JumpScale import j
from JumpScale.core.pmtypes.base import BaseType as TypeBaseType
def generate_init_properties(cls, attrs):
'''Generate a class __init_properties__ method
@param cls: Type to generate method for
@type cls: type
@param attrs: Class construction attributes
@type attrs: dict
@returns: __init_properties__ method
@rtype: method
'''
def __init_properties__(self):
'''Initialize all properties with their default value'''
# Call superclass __init_properties__, if any. No-op otherwise
base = super(cls, self)
if hasattr(base, '__init_properties__'):
base.__init_properties__()
for name, attr in (p for p in attrs.iteritems() \
if isinstance(p[1], TypeBaseType)):
value = attr.get_default(self)
setattr(self, attr.attribute_name, value)
return __init_properties__
class BaseTypeMeta(type):
'''Meta class for all BaseTypes, makes sure we know the name of descriptor attributes'''
def __new__(cls, name, bases, attrs):
t = type.__new__(cls, name, bases, attrs)
try:
#If this *is* 'BaseObject' we don't want to do anything special with it
#This raises a NameError if BaseObject is not 'known' yet
BaseType
except NameError:
return t
# Store attribute name on BaseType attributes
for name, value in (p for p in attrs.iteritems() \
if isinstance(p[1], TypeBaseType)):
value._PM_NAME = name
#Generate __init_properties__
ip = generate_init_properties(t, attrs)
setattr(t, '__init_properties__', ip)
property_metadata = dict()
for base in bases:
property_metadata.update(
getattr(base, 'pm_property_metadata', dict()))
for name, value in (p for p in attrs.iteritems() \
if isinstance(p[1], TypeBaseType)):
property_metadata[name] = value.constructor_args
setattr(t, 'pm_property_metadata', property_metadata)
return t
class BaseType(object):
__metaclass__ = BaseTypeMeta
def __init__(self):
"""
Initialize basetype
During initialization all pmtype properties are set to their default
values. This is only done when an object is created for the first time,
otherwise the property values would be overwritten when e.g. restoring
an object from the cmdb.
"""
if not hasattr(self, '_pm__initialized'):
self.__init_properties__()
self._pm__initialized = True | PypiClean |
/ELLIPTIc-1.0.1-py3-none-any.whl/elliptic/Kernel/Context.py | from collections import defaultdict
from typing import Dict, List, Any, Union, Iterable
from abc import ABC, abstractmethod
class ContextException(Exception):
"""Exception raised when an error related to a Context operation occurs.
"""
class Context:
"""Defines a context for code generation.
A Context is basically a dictionary of stacks. In other words, it defines
a `stack_name -> stack` mapping. Each `stack` has is semantically defined by its
`stack_name`.
Example:
>>> context = Context()
>>> context.put_value('current_value', '100')
>>> context.put_value('current_value', '200')
>>> context.get_value('current_value') # 200
>>> context.pop_value('current_value')
>>> context.get_value('current_value') # 100
"""
def __init__(self) -> None:
self.context: Dict[str, List[Any]] = defaultdict(list)
def put_value(self, name: str, value: Union[str, Iterable[str]]) -> None:
"""Pushes the value `value` to a stack named `name`.
Parameters:
name: Stack name.
value: Value to be pushed into the stack.
"""
self.context[name].append(value)
def get_value(self, name: str) -> str:
"""Gets the front value of the stack named `name`.
Parameters:
name: Stack name.
"""
try:
return self.context[name][-1]
except IndexError:
raise ContextException(f"Name {name} does not exist in the context.")
def pop_value(self, name: str) -> None:
"""Pops the front value of the stack named `name`.
Parameters:
name: Stack name.
"""
try:
self.context[name].pop()
except IndexError:
raise ContextException(f"Name {name} does not exist in the context.")
def clear_values(self, name: str) -> None:
"""Clears the stack named `name`.
Parameters:
name: Stack name.
"""
self.context[name].clear()
class ContextDelegate(ABC):
"""Delegate class for getting the generated code template file and its kwargs for a
given expression.
Also defines the context state changes when the corresponding expression node is visited
and exited.
Attributes:
context: Context instance.
unique_id: A unique id that can be used to identify values that were created from this context delegate.
Parameters:
context: Context instance.
unique_id: A unique id.
"""
def __init__(self, context: Context, unique_id: int):
self.context: Context = context
self.unique_id: int = unique_id
self.child = ""
@abstractmethod
def get_template_file(self) -> str:
"""Returns the template file containing the generated code for the expression.
"""
raise NotImplementedError
@abstractmethod
def template_kwargs(self) -> Dict[str, Any]:
"""Returns the arguments (a dictionary) that will be passed to the template.
"""
raise NotImplementedError
@abstractmethod
def context_enter(self) -> None:
"""Modifies the context state. Called when the expression node is visited.
Use this method to prepare the context for expressions that will be visited
afterwards. It is preferable to keep most `context.put_value` calls in this method.
"""
raise NotImplementedError
@abstractmethod
def context_exit(self) -> None:
"""Modifies the context state. Called when the expression node is exited.
Use this method to clear values from the context and prepare it for the
expression nodes that were visited before. It is preferable to keep most
`context.pop_value` calls in this method.
"""
raise NotImplementedError | PypiClean |
Subsets and Splits