blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
52a5fc44063f2e73239719204651a2f2b4b3e5e5 | 767b5482f3c5b9c2c85575c711e37561f5b8f198 | /engine/engine_lib/encoderlib.py | 27d186e1e4d625fe001279e1c8110f2ff708818f | [] | no_license | zhupite233/scaner | 8e39c903f295d06195be20067043087ec8baac4f | 7c29c02bca2247a82bcbb91cc86955cc27998c95 | refs/heads/master | 2020-05-18T03:23:03.459222 | 2019-04-15T04:29:10 | 2019-04-15T04:29:10 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,332 | py | #!/usr/bin/env python
"""
This is the encoding / decoding functions collection for DharmaEncoder. It
allows you to encode and decode various data formats.
(c) 2010 Nathan Hamiel
Email: nathan{at}neohaxor{dot}org
Hexsec Labs: http://hexsec.com/labs
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib
import hashlib
import cgi
import StringIO
import zlib
import decimal
from xml.sax.saxutils import unescape
from xml.sax.saxutils import escape
###################
# Encoder section #
###################
def url_encode(encvalue):
""" URL encode the specifed value. Example Format: Hello%20World """
try:
encoded_value = urllib.quote(encvalue)
except:
encoded_value = "There was a problem with the specified value"
return(encoded_value)
def full_url_encode(encvalue):
""" Full URL Hex encode the specified value.
Example Format: %48%65%6c%6c%6f%20%57%6f%72%6c%64 """
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "%")
hexval += val
return(hexval)
def base64_encode(encvalue):
""" Base64 encode the specified value. Example Format: SGVsbG8gV29ybGQ= """
try:
basedata = encvalue.encode("Base64")
except:
basedata = "There was an error"
return(basedata)
# def html_entity_encode(encvalue):
# """ Encode value using HTML entities. Example Format: """
#####
# Follow up on this. It needs to be fixed
#####
# encoded_value = cgi.escape(encvalue)
# return(encoded_value)
def hex_encode(encvalue):
""" Encode value to Hex. Example Format: 48656c6c6f2576f726c64"""
hexval = ""
for item in encvalue:
val = hex(ord(item)).strip("0x")
hexval += val
return(hexval)
def hex_entity_encode(encvalue):
""" Encode value to a Hex entitiy. Example Format: Hello"""
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "&#x") + ";"
hexval += val
return(hexval)
def unicode_encode(encvalue):
""" Unicode encode the specified value in the %u00 format. Example:
%u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """
hexval = ""
for item in encvalue:
val = hex(ord(item)).replace("0x", "%u00")
hexval += val
return(hexval)
def escape_xml(encvalue):
""" Escape the specified HTML/XML value. Example Format: Hello&World """
escaped = escape(encvalue, {"'": "'", '"': """})
return(escaped)
def md5_hash(encvalue):
""" md5 hash the specified value.
Example Format: b10a8db164e0754105b7a99be72e3fe5"""
hashdata = hashlib.md5(encvalue).hexdigest()
return(hashdata)
def sha1_hash(encvalue):
""" sha1 hash the specified value.
Example Format: 0a4d55a8d778e5022fab701977c5d840bbc486d0 """
hashdata = hashlib.sha1(encvalue).hexdigest()
return(hashdata)
def sqlchar_encode(encvalue):
""" SQL char encode the specified value.
Example Format: CHAR(72)+CHAR(101)+CHAR(108)+CHAR(108)+CHAR(111)"""
charstring = ""
for item in encvalue:
val = "CHAR(" + str(ord(item)) + ")+"
charstring += val
return(charstring.rstrip("+"))
####
# oraclechr_encode not tested yet, but should work
####
def oraclechr_encode(encvalue):
""" Oracle chr encode the specified value. """
charstring = ""
for item in encvalue:
val = "chr(" + str(ord(item)) + ")||"
charstring += val
return(charstring.rstrip("||"))
def decimal_convert(encvalue):
""" Convert input to decimal value.
Example Format: 721011081081113287111114108100 """
decvalue = ""
for item in encvalue:
decvalue += str(ord(item))
return(decvalue)
def decimal_entity_encode(encvalue):
""" Convert input to a decimal entity.
Example Format: Hello World """
decvalue = ""
for item in encvalue:
decvalue += "&#" + str(ord(item)) +";"
return(decvalue)
def rot13_encode(encvalue):
""" Perform ROT13 encoding on the specified value.
Example Format: Uryyb Jbeyq """
return(encvalue.encode("rot13"))
###################
# Decoder section #
###################
def url_decode(decvalue):
""" URL Decode the specified value. Example Format: Hello%20World """
returnval = urllib.unquote(decvalue)
return(returnval)
def fullurl_decode(decvalue):
""" Full URL decode the specified value.
Example Format: %48%65%6c%6c%6f%20%57%6f%72%6c%64 """
splithex = decvalue.split("%")
hexdec = ""
for item in splithex:
if item != "":
hexdec += chr(int(item, 16))
return(hexdec)
def base64_decode(decvalue):
""" Base64 decode the specified value.
Example Format: SGVsbG8gV29ybGQ= """
msg = """ There was an error. Most likely this isn't a valid Base64 value
and Python choked on it """
try:
base64dec = decvalue.decode("Base64")
return(base64dec)
except:
return(msg)
def hex_decode(decvalue):
""" Hex decode the specified value.
Example Format: 48656c6c6f2576f726c64 """
msg = """ There was an error, perhaps an invalid length for the hex
value """
try:
decodeval = decvalue.decode("hex")
return(decodeval)
except:
return(msg)
def hexentity_decode(decvalue):
""" Hex entity decode the specified value.
Example Format: Hello """
charval = ""
splithex = decvalue.split(";")
for item in splithex:
# Necessary because split creates an empty "" that tries to be
# converted with int()
if item != "":
hexcon = item.replace("&#", "0")
charcon = chr(int(hexcon, 16))
charval += charcon
else:
pass
return(charval)
def unescape_xml(decvalue):
""" Unescape the specified HTML or XML value: Hel啊lo&World"""
unescaped = unescape(decvalue, {"'": "'", """: '"'})
return(unescaped)
def unicode_decode(decvalue):
""" Unicode decode the specified value %u00 format.
Example Format: %u0048%u0065%u006c%u006c%u006f%u0020%u0057%u006f%u0072%u006c%u0064 """
charval = ""
splithex = decvalue.split("%u00")
for item in splithex:
if item != "":
hexcon = item.replace("%u00", "0")
charcon = chr(int(hexcon, 16))
charval += charcon
else:
pass
return(charval)
def rot13_decode(decvalue):
""" ROT13 decode the specified value. Example Format: Uryyb Jbeyq
rot13 回转位13 a编码后转换成b,b经过相同的编码之后会转换成
"""
return(decvalue.decode("rot13"))
| [
"[email protected]"
] | |
e6d4a5b68241ef8bf821e322cb11bd1f31db75b6 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/rci.py | 1499f78fdcb23fcbcc72afecd718862922797f9e | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'rCI':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
2e3138b7aebe9b0d818303c674da9144988dee2d | 2b0eab74af8d23244ff11699830f9bb10fbd717a | /helpers/mixins/unpack_tags_mixin.py | 5e6e4c11c733fc5368427ac90ddb23bf2e781302 | [] | no_license | alexandrenorman/mixeur | c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b | 95d21cd6036a99c5f399b700a5426e9e2e17e878 | refs/heads/main | 2023-03-13T23:50:11.800627 | 2021-03-07T15:49:15 | 2021-03-07T15:49:15 | 345,384,858 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,277 | py | from .unpack_ids_mixin import UnpackIdsMixin
class UnpackTagsMixin(UnpackIdsMixin):
"""
Mixin to apply on a ModelViewSet which transform registered fields from string containing ids to list of objects
"1,2,3" => [<Obj id=1>, <Obj id=2>, <Obj id=3>]
If a string passed, it will create a new instance of given model with given model name field
"1,2,truc" => [<Obj id=1 name=...>, <Obj id=2 name=...>, <new Obj id=3 name="truc">]
Should define unpackable fields like this :
unpackable_fields = {'data_field_name': (ModelName, 'model_field_name')}
"""
def get_item_id(self, word, options):
"""
If given tag contain only digits, use it as id, else create the instance
"""
item_id = None
if word.isdigit():
item_id = int(word)
elif options:
tag_model, tag_model_field = options
existing_tag = tag_model.objects.filter(**{tag_model_field: word}).first()
if existing_tag:
item_id = existing_tag.id
elif word != "":
item_id = tag_model.objects.create(**{tag_model_field: word}).id
else:
return {"id": None}
if item_id is not None:
return {"id": item_id}
| [
"[email protected]"
] | |
05af6eb6e60b4748045485fcbf36d751acf72583 | 0c7ff0ec35ba2bb38f99ef6ecb261ec33466dd52 | /Day1/day1Project.py | 2d1e56254a4ef4fd53ab5a15fdd51db183e510ec | [] | no_license | TheKinshu/100-Days-Python | 15cbacc608ee349cc9733a7032e10a359bebb731 | 293ad6b3e5f5208da84efbc5b2d2d395a5a53421 | refs/heads/master | 2023-04-18T08:21:30.361800 | 2021-05-02T18:48:39 | 2021-05-02T18:48:39 | 351,582,416 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | #1. Create a greeting for your program.
print("Welcome to the Band Name Generator.")
#2. Ask the user for the city that they grew up in.
city = input("What's name of the city you gre up in?\n")
#3. Ask the user for the name of a pet.
pet = input("What's your pet's name?\n")
#4. Combine the name of their city and pet and show them their band name.
print("Your band name could be " + city + " " + pet)
#5. Make sure the input cursor shows on a new line, see the example at:
# https://band-name-generator-end.appbrewery.repl.run/ | [
"[email protected]"
] | |
d2534e7f9ed2539c6ec7228c87061771a60c4676 | 1d11288ec1a5d98dcf66c4ca45072ffd29901de0 | /mrp_extend/models/mrp_bom_line.py | 0731280072097855fc742fa848452a84c7f6fb29 | [] | no_license | pyrun13/addons | 14202e273c802cee391a68474a6bdc7cf062b25c | b81650d81e0a227dd4fc460846e53ce5e61a8cc1 | refs/heads/master | 2020-09-07T21:48:18.673226 | 2019-11-12T16:15:06 | 2019-11-12T16:15:06 | 220,921,425 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 412 | py | from odoo import models, fields, api, exceptions
class MrpBomLine(models.Model):
_inherit = 'mrp.bom.line'
attrition_rate = fields.Float(string='损耗率(%)')
def write(self, vals):
attrition_rate = vals.get('attrition_rate', 0)
if attrition_rate < 0:
raise exceptions.ValidationError('损耗率不能为负数!')
return super(MrpBomLine, self).write(vals)
| [
"[email protected]"
] | |
8c59ff3068e701a47f55427121fb4d45c93db56c | 649e2af15011b3c6326436e91a9dd9af0c3a6f8f | /vnpy/app/spread_trading/engine.py | 0a6901795c79ebaf15b64c56c62d0f2272d57e13 | [
"MIT"
] | permissive | Loopring/vnpy | 6270662260c2fdbeed846f0370d1b5eecea7c7bf | f7945b23e29dab8bfdf064da6a6cb815bb755b17 | refs/heads/loopring-release | 2023-07-16T23:11:10.174728 | 2021-09-06T04:01:00 | 2021-09-06T04:01:00 | 277,985,227 | 21 | 6 | MIT | 2021-01-23T02:21:08 | 2020-07-08T03:59:17 | Python | UTF-8 | Python | false | false | 31,837 | py | import traceback
import importlib
import os
from typing import List, Dict, Set, Callable, Any, Type
from collections import defaultdict
from copy import copy
from pathlib import Path
from datetime import datetime, timedelta
from vnpy.event import EventEngine, Event
from vnpy.trader.engine import BaseEngine, MainEngine
from vnpy.trader.event import (
EVENT_TICK, EVENT_POSITION, EVENT_CONTRACT,
EVENT_ORDER, EVENT_TRADE, EVENT_TIMER
)
from vnpy.trader.utility import load_json, save_json
from vnpy.trader.object import (
TickData, ContractData, LogData,
SubscribeRequest, OrderRequest
)
from vnpy.trader.constant import (
Direction, Offset, OrderType, Interval
)
from vnpy.trader.converter import OffsetConverter
from .base import (
LegData, SpreadData,
EVENT_SPREAD_DATA, EVENT_SPREAD_POS,
EVENT_SPREAD_ALGO, EVENT_SPREAD_LOG,
EVENT_SPREAD_STRATEGY,
load_bar_data, load_tick_data
)
from .template import SpreadAlgoTemplate, SpreadStrategyTemplate
from .algo import SpreadTakerAlgo
APP_NAME = "SpreadTrading"
class SpreadEngine(BaseEngine):
""""""
def __init__(self, main_engine: MainEngine, event_engine: EventEngine):
"""Constructor"""
super().__init__(main_engine, event_engine, APP_NAME)
self.active = False
self.data_engine: SpreadDataEngine = SpreadDataEngine(self)
self.algo_engine: SpreadAlgoEngine = SpreadAlgoEngine(self)
self.strategy_engine: SpreadStrategyEngine = SpreadStrategyEngine(self)
self.add_spread = self.data_engine.add_spread
self.remove_spread = self.data_engine.remove_spread
self.get_spread = self.data_engine.get_spread
self.get_all_spreads = self.data_engine.get_all_spreads
self.start_algo = self.algo_engine.start_algo
self.stop_algo = self.algo_engine.stop_algo
def start(self):
""""""
if self.active:
return
self.active = True
self.data_engine.start()
self.algo_engine.start()
self.strategy_engine.start()
def stop(self):
""""""
self.data_engine.stop()
self.algo_engine.stop()
self.strategy_engine.stop()
def write_log(self, msg: str):
""""""
log = LogData(
msg=msg,
gateway_name=APP_NAME
)
event = Event(EVENT_SPREAD_LOG, log)
self.event_engine.put(event)
class SpreadDataEngine:
""""""
setting_filename = "spread_trading_setting.json"
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.legs: Dict[str, LegData] = {} # vt_symbol: leg
self.spreads: Dict[str, SpreadData] = {} # name: spread
self.symbol_spread_map: Dict[str, List[SpreadData]] = defaultdict(list)
def start(self):
""""""
self.load_setting()
self.register_event()
self.write_log("价差数据引擎启动成功")
def stop(self):
""""""
pass
def load_setting(self) -> None:
""""""
setting = load_json(self.setting_filename)
for spread_setting in setting:
self.add_spread(
spread_setting["name"],
spread_setting["leg_settings"],
spread_setting["active_symbol"],
spread_setting.get("min_volume", 1),
save=False
)
def save_setting(self) -> None:
""""""
setting = []
for spread in self.spreads.values():
leg_settings = []
for leg in spread.legs.values():
price_multiplier = spread.price_multipliers[leg.vt_symbol]
trading_multiplier = spread.trading_multipliers[leg.vt_symbol]
inverse_contract = spread.inverse_contracts[leg.vt_symbol]
leg_setting = {
"vt_symbol": leg.vt_symbol,
"price_multiplier": price_multiplier,
"trading_multiplier": trading_multiplier,
"inverse_contract": inverse_contract
}
leg_settings.append(leg_setting)
spread_setting = {
"name": spread.name,
"leg_settings": leg_settings,
"active_symbol": spread.active_leg.vt_symbol,
"min_volume": spread.min_volume
}
setting.append(spread_setting)
save_json(self.setting_filename, setting)
def register_event(self) -> None:
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_CONTRACT, self.process_contract_event)
def process_tick_event(self, event: Event) -> None:
""""""
tick = event.data
leg = self.legs.get(tick.vt_symbol, None)
if not leg:
return
leg.update_tick(tick)
for spread in self.symbol_spread_map[tick.vt_symbol]:
spread.calculate_price()
self.put_data_event(spread)
def process_position_event(self, event: Event) -> None:
""""""
position = event.data
leg = self.legs.get(position.vt_symbol, None)
if not leg:
return
leg.update_position(position)
for spread in self.symbol_spread_map[position.vt_symbol]:
spread.calculate_pos()
self.put_pos_event(spread)
def process_trade_event(self, event: Event) -> None:
""""""
trade = event.data
leg = self.legs.get(trade.vt_symbol, None)
if not leg:
return
leg.update_trade(trade)
for spread in self.symbol_spread_map[trade.vt_symbol]:
spread.calculate_pos()
self.put_pos_event(spread)
def process_contract_event(self, event: Event) -> None:
""""""
contract = event.data
leg = self.legs.get(contract.vt_symbol, None)
if leg:
# Update contract data
leg.update_contract(contract)
req = SubscribeRequest(
contract.symbol, contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
def put_data_event(self, spread: SpreadData) -> None:
""""""
event = Event(EVENT_SPREAD_DATA, spread)
self.event_engine.put(event)
def put_pos_event(self, spread: SpreadData) -> None:
""""""
event = Event(EVENT_SPREAD_POS, spread)
self.event_engine.put(event)
def get_leg(self, vt_symbol: str) -> LegData:
""""""
leg = self.legs.get(vt_symbol, None)
if not leg:
leg = LegData(vt_symbol)
self.legs[vt_symbol] = leg
# Subscribe market data
contract = self.main_engine.get_contract(vt_symbol)
if contract:
leg.update_contract(contract)
req = SubscribeRequest(
contract.symbol,
contract.exchange
)
self.main_engine.subscribe(req, contract.gateway_name)
# Initialize leg position
for direction in Direction:
vt_positionid = f"{vt_symbol}.{direction.value}"
position = self.main_engine.get_position(vt_positionid)
if position:
leg.update_position(position)
return leg
def add_spread(
self,
name: str,
leg_settings: List[Dict],
active_symbol: str,
min_volume: float,
save: bool = True
) -> None:
""""""
if name in self.spreads:
self.write_log("价差创建失败,名称重复:{}".format(name))
return
legs: List[LegData] = []
price_multipliers: Dict[str, int] = {}
trading_multipliers: Dict[str, int] = {}
inverse_contracts: Dict[str, bool] = {}
for leg_setting in leg_settings:
vt_symbol = leg_setting["vt_symbol"]
leg = self.get_leg(vt_symbol)
legs.append(leg)
price_multipliers[vt_symbol] = leg_setting["price_multiplier"]
trading_multipliers[vt_symbol] = leg_setting["trading_multiplier"]
inverse_contracts[vt_symbol] = leg_setting.get(
"inverse_contract", False)
spread = SpreadData(
name,
legs,
price_multipliers,
trading_multipliers,
active_symbol,
inverse_contracts,
min_volume
)
self.spreads[name] = spread
for leg in spread.legs.values():
self.symbol_spread_map[leg.vt_symbol].append(spread)
if save:
self.save_setting()
self.write_log("价差创建成功:{}".format(name))
self.put_data_event(spread)
def remove_spread(self, name: str) -> None:
""""""
if name not in self.spreads:
return
spread = self.spreads.pop(name)
for leg in spread.legs.values():
self.symbol_spread_map[leg.vt_symbol].remove(spread)
self.save_setting()
self.write_log("价差移除成功:{},重启后生效".format(name))
def get_spread(self, name: str) -> SpreadData:
""""""
spread = self.spreads.get(name, None)
return spread
def get_all_spreads(self) -> List[SpreadData]:
""""""
return list(self.spreads.values())
class SpreadAlgoEngine:
""""""
algo_class = SpreadTakerAlgo
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.spreads: Dict[str: SpreadData] = {}
self.algos: Dict[str: SpreadAlgoTemplate] = {}
self.order_algo_map: dict[str: SpreadAlgoTemplate] = {}
self.symbol_algo_map: dict[str: SpreadAlgoTemplate] = defaultdict(list)
self.algo_count: int = 0
self.vt_tradeids: Set = set()
self.offset_converter: OffsetConverter = OffsetConverter(
self.main_engine
)
def start(self):
""""""
self.register_event()
self.write_log("价差算法引擎启动成功")
def stop(self):
""""""
for algo in self.algos.values():
self.stop_algo(algo)
def register_event(self):
""""""
self.event_engine.register(EVENT_TICK, self.process_tick_event)
self.event_engine.register(EVENT_ORDER, self.process_order_event)
self.event_engine.register(EVENT_TRADE, self.process_trade_event)
self.event_engine.register(EVENT_POSITION, self.process_position_event)
self.event_engine.register(EVENT_TIMER, self.process_timer_event)
self.event_engine.register(
EVENT_SPREAD_DATA, self.process_spread_event
)
def process_spread_event(self, event: Event):
""""""
spread: SpreadData = event.data
self.spreads[spread.name] = spread
def process_tick_event(self, event: Event):
""""""
tick = event.data
algos = self.symbol_algo_map[tick.vt_symbol]
if not algos:
return
buf = copy(algos)
for algo in buf:
if not algo.is_active():
algos.remove(algo)
else:
algo.update_tick(tick)
def process_order_event(self, event: Event):
""""""
order = event.data
self.offset_converter.update_order(order)
algo = self.order_algo_map.get(order.vt_orderid, None)
if algo and algo.is_active():
algo.update_order(order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
# Filter duplicate trade push
if trade.vt_tradeid in self.vt_tradeids:
return
self.vt_tradeids.add(trade.vt_tradeid)
self.offset_converter.update_trade(trade)
algo = self.order_algo_map.get(trade.vt_orderid, None)
if algo and algo.is_active():
algo.update_trade(trade)
def process_position_event(self, event: Event):
""""""
position = event.data
self.offset_converter.update_position(position)
def process_timer_event(self, event: Event):
""""""
buf = list(self.algos.values())
for algo in buf:
if not algo.is_active():
self.algos.pop(algo.algoid)
else:
algo.update_timer()
def start_algo(
self,
spread_name: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool
) -> str:
# Find spread object
spread = self.spreads.get(spread_name, None)
if not spread:
self.write_log("创建价差算法失败,找不到价差:{}".format(spread_name))
return ""
# Generate algoid str
self.algo_count += 1
algo_count_str = str(self.algo_count).rjust(6, "0")
algoid = f"{self.algo_class.algo_name}_{algo_count_str}"
# Create algo object
algo = self.algo_class(
self,
algoid,
spread,
direction,
offset,
price,
volume,
payup,
interval,
lock
)
self.algos[algoid] = algo
# Generate map between vt_symbol and algo
for leg in spread.legs.values():
self.symbol_algo_map[leg.vt_symbol].append(algo)
# Put event to update GUI
self.put_algo_event(algo)
return algoid
def stop_algo(
self,
algoid: str
):
""""""
algo = self.algos.get(algoid, None)
if not algo:
self.write_log("停止价差算法失败,找不到算法:{}".format(algoid))
return
algo.stop()
def put_algo_event(self, algo: SpreadAlgoTemplate) -> None:
""""""
event = Event(EVENT_SPREAD_ALGO, algo)
self.event_engine.put(event)
def write_algo_log(self, algo: SpreadAlgoTemplate, msg: str) -> None:
""""""
msg = f"{algo.algoid}:{msg}"
self.write_log(msg)
def send_order(
self,
algo: SpreadAlgoTemplate,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
lock: bool
) -> List[str]:
""""""
holding = self.offset_converter.get_position_holding(vt_symbol)
contract = self.main_engine.get_contract(vt_symbol)
if direction == Direction.LONG:
available = holding.short_pos - holding.short_pos_frozen
else:
available = holding.long_pos - holding.long_pos_frozen
# If no position to close, just open new
if not available:
offset = Offset.OPEN
# If enougth position to close, just close old
elif volume < available:
offset = Offset.CLOSE
# Otherwise, just close existing position
else:
volume = available
offset = Offset.CLOSE
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=OrderType.LIMIT,
price=price,
volume=volume
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(
original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and algo.
self.order_algo_map[vt_orderid] = algo
return vt_orderids
def cancel_order(self, algo: SpreadAlgoTemplate, vt_orderid: str) -> None:
""""""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_algo_log(algo, "撤单失败,找不到委托{}".format(vt_orderid))
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def get_tick(self, vt_symbol: str) -> TickData:
""""""
return self.main_engine.get_tick(vt_symbol)
def get_contract(self, vt_symbol: str) -> ContractData:
""""""
return self.main_engine.get_contract(vt_symbol)
class SpreadStrategyEngine:
""""""
setting_filename = "spread_trading_strategy.json"
def __init__(self, spread_engine: SpreadEngine):
""""""
self.spread_engine: SpreadEngine = spread_engine
self.main_engine: MainEngine = spread_engine.main_engine
self.event_engine: EventEngine = spread_engine.event_engine
self.write_log = spread_engine.write_log
self.strategy_setting: Dict[str: Dict] = {}
self.classes: Dict[str: Type[SpreadStrategyTemplate]] = {}
self.strategies: Dict[str: SpreadStrategyTemplate] = {}
self.order_strategy_map: dict[str: SpreadStrategyTemplate] = {}
self.algo_strategy_map: dict[str: SpreadStrategyTemplate] = {}
self.spread_strategy_map: dict[str: SpreadStrategyTemplate] = defaultdict(
list)
self.vt_tradeids: Set = set()
self.load_strategy_class()
def start(self):
""""""
self.load_strategy_setting()
self.register_event()
self.write_log("价差策略引擎启动成功")
def close(self):
""""""
self.stop_all_strategies()
def load_strategy_class(self):
"""
Load strategy class from source code.
"""
path1 = Path(__file__).parent.joinpath("strategies")
self.load_strategy_class_from_folder(
path1, "vnpy.app.spread_trading.strategies")
path2 = Path.cwd().joinpath("strategies")
self.load_strategy_class_from_folder(path2, "strategies")
def load_strategy_class_from_folder(self, path: Path, module_name: str = ""):
"""
Load strategy class from certain folder.
"""
for dirpath, dirnames, filenames in os.walk(str(path)):
for filename in filenames:
if filename.split(".")[-1] in ("py", "pyd", "so"):
strategy_module_name = ".".join([module_name, filename.split(".")[0]])
self.load_strategy_class_from_module(strategy_module_name)
def load_strategy_class_from_module(self, module_name: str):
"""
Load strategy class from module file.
"""
try:
module = importlib.import_module(module_name)
for name in dir(module):
value = getattr(module, name)
if (isinstance(value, type) and issubclass(value, SpreadStrategyTemplate) and value is not SpreadStrategyTemplate):
self.classes[value.__name__] = value
except: # noqa
msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}"
self.write_log(msg)
def get_all_strategy_class_names(self):
""""""
return list(self.classes.keys())
def load_strategy_setting(self):
"""
Load setting file.
"""
self.strategy_setting = load_json(self.setting_filename)
for strategy_name, strategy_config in self.strategy_setting.items():
self.add_strategy(
strategy_config["class_name"],
strategy_name,
strategy_config["spread_name"],
strategy_config["setting"]
)
def update_strategy_setting(self, strategy_name: str, setting: dict):
"""
Update setting file.
"""
strategy = self.strategies[strategy_name]
self.strategy_setting[strategy_name] = {
"class_name": strategy.__class__.__name__,
"spread_name": strategy.spread_name,
"setting": setting,
}
save_json(self.setting_filename, self.strategy_setting)
def remove_strategy_setting(self, strategy_name: str):
"""
Update setting file.
"""
if strategy_name not in self.strategy_setting:
return
self.strategy_setting.pop(strategy_name)
save_json(self.setting_filename, self.strategy_setting)
def register_event(self):
""""""
ee = self.event_engine
ee.register(EVENT_ORDER, self.process_order_event)
ee.register(EVENT_TRADE, self.process_trade_event)
ee.register(EVENT_SPREAD_DATA, self.process_spread_data_event)
ee.register(EVENT_SPREAD_POS, self.process_spread_pos_event)
ee.register(EVENT_SPREAD_ALGO, self.process_spread_algo_event)
def process_spread_data_event(self, event: Event):
""""""
spread = event.data
strategies = self.spread_strategy_map[spread.name]
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_spread_data)
def process_spread_pos_event(self, event: Event):
""""""
spread = event.data
strategies = self.spread_strategy_map[spread.name]
for strategy in strategies:
if strategy.inited:
self.call_strategy_func(strategy, strategy.on_spread_pos)
def process_spread_algo_event(self, event: Event):
""""""
algo = event.data
strategy = self.algo_strategy_map.get(algo.algoid, None)
if strategy:
self.call_strategy_func(
strategy, strategy.update_spread_algo, algo)
def process_order_event(self, event: Event):
""""""
order = event.data
strategy = self.order_strategy_map.get(order.vt_orderid, None)
if strategy:
self.call_strategy_func(strategy, strategy.update_order, order)
def process_trade_event(self, event: Event):
""""""
trade = event.data
strategy = self.order_strategy_map.get(trade.vt_orderid, None)
if strategy:
self.call_strategy_func(strategy, strategy.on_trade, trade)
def call_strategy_func(
self, strategy: SpreadStrategyTemplate, func: Callable, params: Any = None
):
"""
Call function of a strategy and catch any exception raised.
"""
try:
if params:
func(params)
else:
func()
except Exception:
strategy.trading = False
strategy.inited = False
msg = f"触发异常已停止\n{traceback.format_exc()}"
self.write_strategy_log(strategy, msg)
def add_strategy(
self, class_name: str, strategy_name: str, spread_name: str, setting: dict
):
"""
Add a new strategy.
"""
if strategy_name in self.strategies:
self.write_log(f"创建策略失败,存在重名{strategy_name}")
return
strategy_class = self.classes.get(class_name, None)
if not strategy_class:
self.write_log(f"创建策略失败,找不到策略类{class_name}")
return
spread = self.spread_engine.get_spread(spread_name)
if not spread:
self.write_log(f"创建策略失败,找不到价差{spread_name}")
return
strategy = strategy_class(self, strategy_name, spread, setting)
self.strategies[strategy_name] = strategy
# Add vt_symbol to strategy map.
strategies = self.spread_strategy_map[spread_name]
strategies.append(strategy)
# Update to setting file.
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def edit_strategy(self, strategy_name: str, setting: dict):
"""
Edit parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
strategy.update_setting(setting)
self.update_strategy_setting(strategy_name, setting)
self.put_strategy_event(strategy)
def remove_strategy(self, strategy_name: str):
"""
Remove a strategy.
"""
strategy = self.strategies[strategy_name]
if strategy.trading:
self.write_log(f"策略{strategy.strategy_name}移除失败,请先停止")
return
# Remove setting
self.remove_strategy_setting(strategy_name)
# Remove from symbol strategy map
strategies = self.spread_strategy_map[strategy.spread_name]
strategies.remove(strategy)
# Remove from strategies
self.strategies.pop(strategy_name)
return True
def init_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if strategy.inited:
self.write_log(f"{strategy_name}已经完成初始化,禁止重复操作")
return
self.call_strategy_func(strategy, strategy.on_init)
strategy.inited = True
self.put_strategy_event(strategy)
self.write_log(f"{strategy_name}初始化完成")
def start_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if not strategy.inited:
self.write_log(f"策略{strategy.strategy_name}启动失败,请先初始化")
return
if strategy.trading:
self.write_log(f"{strategy_name}已经启动,请勿重复操作")
return
self.call_strategy_func(strategy, strategy.on_start)
strategy.trading = True
self.put_strategy_event(strategy)
def stop_strategy(self, strategy_name: str):
""""""
strategy = self.strategies[strategy_name]
if not strategy.trading:
return
self.call_strategy_func(strategy, strategy.on_stop)
strategy.stop_all_algos()
strategy.cancel_all_orders()
strategy.trading = False
self.put_strategy_event(strategy)
def init_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.init_strategy(strategy)
def start_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.start_strategy(strategy)
def stop_all_strategies(self):
""""""
for strategy in self.strategies.keys():
self.stop_strategy(strategy)
def get_strategy_class_parameters(self, class_name: str):
"""
Get default parameters of a strategy class.
"""
strategy_class = self.classes[class_name]
parameters = {}
for name in strategy_class.parameters:
parameters[name] = getattr(strategy_class, name)
return parameters
def get_strategy_parameters(self, strategy_name):
"""
Get parameters of a strategy.
"""
strategy = self.strategies[strategy_name]
return strategy.get_parameters()
def start_algo(
self,
strategy: SpreadStrategyTemplate,
spread_name: str,
direction: Direction,
offset: Offset,
price: float,
volume: float,
payup: int,
interval: int,
lock: bool
) -> str:
""""""
algoid = self.spread_engine.start_algo(
spread_name,
direction,
offset,
price,
volume,
payup,
interval,
lock
)
self.algo_strategy_map[algoid] = strategy
return algoid
def stop_algo(self, strategy: SpreadStrategyTemplate, algoid: str):
""""""
self.spread_engine.stop_algo(algoid)
def stop_all_algos(self, strategy: SpreadStrategyTemplate):
""""""
pass
def send_order(
self,
strategy: SpreadStrategyTemplate,
vt_symbol: str,
price: float,
volume: float,
direction: Direction,
offset: Offset,
lock: bool
) -> List[str]:
contract = self.main_engine.get_contract(vt_symbol)
original_req = OrderRequest(
symbol=contract.symbol,
exchange=contract.exchange,
direction=direction,
offset=offset,
type=OrderType.LIMIT,
price=price,
volume=volume
)
# Convert with offset converter
req_list = self.offset_converter.convert_order_request(
original_req, lock)
# Send Orders
vt_orderids = []
for req in req_list:
vt_orderid = self.main_engine.send_order(
req, contract.gateway_name)
# Check if sending order successful
if not vt_orderid:
continue
vt_orderids.append(vt_orderid)
self.offset_converter.update_order_request(req, vt_orderid)
# Save relationship between orderid and strategy.
self.order_strategy_map[vt_orderid] = strategy
return vt_orderids
def cancel_order(self, strategy: SpreadStrategyTemplate, vt_orderid: str):
""""""
order = self.main_engine.get_order(vt_orderid)
if not order:
self.write_strategy_log(
strategy, "撤单失败,找不到委托{}".format(vt_orderid))
return
req = order.create_cancel_request()
self.main_engine.cancel_order(req, order.gateway_name)
def cancel_all_orders(self, strategy: SpreadStrategyTemplate):
""""""
pass
def put_strategy_event(self, strategy: SpreadStrategyTemplate):
""""""
data = strategy.get_data()
event = Event(EVENT_SPREAD_STRATEGY, data)
self.event_engine.put(event)
def write_strategy_log(self, strategy: SpreadStrategyTemplate, msg: str):
""""""
msg = f"{strategy.strategy_name}:{msg}"
self.write_log(msg)
def send_strategy_email(self, strategy: SpreadStrategyTemplate, msg: str):
""""""
if strategy:
subject = f"{strategy.strategy_name}"
else:
subject = "价差策略引擎"
self.main_engine.send_email(subject, msg)
def load_bar(
self, spread: SpreadData, days: int, interval: Interval, callback: Callable
):
""""""
end = datetime.now()
start = end - timedelta(days)
bars = load_bar_data(spread, interval, start, end)
for bar in bars:
callback(bar)
def load_tick(self, spread: SpreadData, days: int, callback: Callable):
""""""
end = datetime.now()
start = end - timedelta(days)
ticks = load_tick_data(spread, start, end)
for tick in ticks:
callback(tick)
| [
"[email protected]"
] | |
d9431f1fb2020f8d301376bed93ef53f3204cbf1 | 0c110eb32f2eaea5c65d40bda846ddc05757ced6 | /python_scripts/pimriscripts/mastersort/scripts_dir/p7432_run2M1.py | 39656c11ebf8cd9db049ce6d7b9a74d8b7e3f30a | [] | no_license | nyspisoccog/ks_scripts | 792148a288d1a9d808e397c1d2e93deda2580ff4 | 744b5a9dfa0f958062fc66e0331613faaaee5419 | refs/heads/master | 2021-01-18T14:22:25.291331 | 2018-10-15T13:08:24 | 2018-10-15T13:08:24 | 46,814,408 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,193 | py | from __future__ import with_statement
import os, csv, shutil,tarfile, uf, dcm_ops
dest_root = '/ifs/scratch/pimri/soccog/test_working'
dst_path_lst = ['7432', 'run2M1']
uf.buildtree(dest_root, dst_path_lst)
uf.copytree('/ifs/scratch/pimri/soccog/old/SocCog_Raw_Data_By_Exam_Number/2480/e1331017/s1388354_5610_2M1_s30', '/ifs/scratch/pimri/soccog/test_working/7432/run2M1')
t = tarfile.open(os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1','MRDC_files.tar.gz'), 'r')
t.extractall('/ifs/scratch/pimri/soccog/test_working/7432/run2M1')
for f in os.listdir('/ifs/scratch/pimri/soccog/test_working/7432/run2M1'):
if 'MRDC' in f and 'gz' not in f:
old = os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', f)
new = os.path.join('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', f + '.dcm')
os.rename(old, new)
qsub_cnv_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', '7432_run2M1', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cnv')
#qsub_cln_out = dcm_ops.cnv_dcm('/ifs/scratch/pimri/soccog/test_working/7432/run2M1', '7432_run2M1', '/ifs/scratch/pimri/soccog/scripts/mastersort/scripts_dir/cln')
| [
"[email protected]"
] | |
0203f8b7a170b9c90a9503a129644d67e720066b | de121a951947f70f402079d288a78d35c85747b2 | /exercises/exercises_04.py | 79cb7651e375b500210a4054a4ae7430a01afd4a | [] | no_license | tpurnachander/requests-workshop | 56899be6c5520fb947d91676c11864d09b4489d6 | dac134558f141c482e0a52f19fdce37b7e7ba928 | refs/heads/master | 2023-03-10T19:00:31.012280 | 2021-02-19T12:08:54 | 2021-02-19T12:08:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,359 | py | import requests
import xml.etree.ElementTree as et
# Exercise 4.1
# Create a function create_xml_body_from_string()
# that returns a docstring (with triple double quotes)
# containing the following XML document:
# <payee>
# <name>John Smith</name>
# <address>
# <street>My street</street>
# <city>My city</city>
# <state>My state</state>
# <zipCode>90210</zipCode>
# </address>
# <phoneNumber>0123456789</phoneNumber>
# <accountNumber>12345</accountNumber>
# </payee>
# Exercise 4.2
# Write a test that POSTs the object created in 4.1
# to http://parabank.parasoft.com/parabank/services/bank/billpay?accountId=12345&amount=500
# Set the request header 'Content-Type' to 'application/xml'
# Then check that the response status code is 200
# and that the value of the response header 'Content-Type' is also equal to 'application/xml'
# Exercise 4.3
# Write a method create_xml_body_using_elementtree() that returns
# the same request body as in Exercise 4.1, but now uses the
# ElementTree library (I've imported that for you already, it's available as 'et')
# Make your life a little easier by specifying all element values as strings
# Exercise 4.4
# Repeat Exercise 4.2, but now use the XML document created in Exercise 4.3
# Don't forget to convert the XML document to a string before sending it!
| [
"[email protected]"
] | |
a5e2debc3b4de63242c2bc5f62e4db0ae3a58645 | 44f07b81df56d7ea44775784a9697648fe481478 | /day8/faceapp/facedetect.py | ab3e244e889618a394e6791b7b7b4edf81d25532 | [] | no_license | shaadomanthra/cbpython-advanced | 436510c70deca4e1ef01517f87bba0e392583a88 | 86b613f89ca0b0cd8b243c157af1a2807e6ce605 | refs/heads/master | 2022-11-30T23:33:45.938854 | 2020-08-12T11:20:03 | 2020-08-12T11:20:03 | 276,316,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | ## detect face and draw rectangles
# import packages (pip install opencv-python)
from cv2 import cv2
import sys
# path for image and cascade
imagePath = 'images/f1.jpg'
cascPath = "haarcascade_frontalface_default.xml"
# Create the haar cascade
faceCascade = cv2.CascadeClassifier(cascPath)
# Read the image & convert to gray scale
image = cv2.imread(imagePath)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# Detect faces in the image
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
print(faces)
# # Draw a rectangle around the faces
for (x, y, w, h) in faces:
cv2.rectangle(image, (x, y), (x+w, y+h), (0, 255, 0), 2)
# #
# # # # open the image widow to display
cv2.imshow("Faces found", image)
cv2.waitKey(0)
# Saving the image
# cv2.imwrite(saveimagePath, image)
| [
"[email protected]"
] | |
001b8e5d7167d9f7ae30d9510713bbc363cc653b | da934e0010380fdc6894063540f61b0ebc2c9ded | /nova/crypto.py | 1f35ffa3915dad74a002a55998c536549c4b8d2d | [
"Apache-2.0"
] | permissive | bopopescu/cc-2 | ed4f1dfe3c98f476ff619058d99855a16272d36b | 37444fb16b36743c439b0d6c3cac2347e0cc0a94 | refs/heads/master | 2022-11-23T03:57:12.255817 | 2014-10-02T06:10:46 | 2014-10-02T06:10:46 | 282,512,589 | 0 | 0 | Apache-2.0 | 2020-07-25T19:36:05 | 2020-07-25T19:36:05 | null | UTF-8 | Python | false | false | 7,863 | py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright [2010] [Anso Labs, LLC]
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Wrappers around standard crypto, including root and intermediate CAs,
SSH keypairs and x509 certificates.
"""
import hashlib
import logging
import os
import shutil
import tempfile
import time
import utils
from nova import vendor
import M2Crypto
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
flags.DEFINE_string('ca_file', 'cacert.pem', 'Filename of root CA')
flags.DEFINE_string('keys_path', utils.abspath('../keys'), 'Where we keep our keys')
flags.DEFINE_string('ca_path', utils.abspath('../CA'), 'Where we keep our root CA')
flags.DEFINE_boolean('use_intermediate_ca', False, 'Should we use intermediate CAs for each project?')
def ca_path(project_id):
if project_id:
return "%s/INTER/%s/cacert.pem" % (FLAGS.ca_path, project_id)
return "%s/cacert.pem" % (FLAGS.ca_path)
def fetch_ca(project_id=None, chain=True):
if not FLAGS.use_intermediate_ca:
project_id = None
buffer = ""
if project_id:
with open(ca_path(project_id),"r") as cafile:
buffer += cafile.read()
if not chain:
return buffer
with open(ca_path(None),"r") as cafile:
buffer += cafile.read()
return buffer
def generate_key_pair(bits=1024):
# what is the magic 65537?
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
(out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
shutil.rmtree(tmpdir)
# code below returns public key in pem format
# key = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
# private_key = key.as_pem(cipher=None)
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
# public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
return (private_key, public_key, fingerprint)
def ssl_pub_to_ssh_pub(ssl_public_key, name='root', suffix='nova'):
"""requires lsh-utils"""
convert="sed -e'1d' -e'$d' | pkcs1-conv --public-key-info --base-64 |" \
+ " sexp-conv | sed -e'1s/(rsa-pkcs1/(rsa-pkcs1-sha1/' | sexp-conv -s" \
+ " transport | lsh-export-key --openssh"
(out, err) = utils.execute(convert, ssl_public_key)
if err:
raise exception.Error("Failed to generate key: %s", err)
return '%s %s@%s\n' %(out.strip(), name, suffix)
def generate_x509_cert(subject="/C=US/ST=California/L=The Mission/O=CloudFed/OU=NOVA/CN=foo", bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
logging.debug("openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating private key: %s", "openssl genrsa -out %s %s" % (keyfile, bits))
utils.runthis("Generating CSR: %s", "openssl req -new -key %s -out %s -batch -subj %s" % (keyfile, csrfile, subject))
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
return (private_key, csr)
def sign_csr(csr_text, intermediate=None):
if not FLAGS.use_intermediate_ca:
intermediate = None
if not intermediate:
return _sign_csr(csr_text, FLAGS.ca_path)
user_ca = "%s/INTER/%s" % (FLAGS.ca_path, intermediate)
if not os.path.exists(user_ca):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
utils.runthis("Generating intermediate CA: %s", "sh geninter.sh %s" % (intermediate))
os.chdir(start)
return _sign_csr(csr_text, user_ca)
def _sign_csr(csr_text, ca_folder):
tmpfolder = tempfile.mkdtemp()
csrfile = open("%s/inbound.csr" % (tmpfolder), "w")
csrfile.write(csr_text)
csrfile.close()
logging.debug("Flags path: %s" % ca_folder)
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
utils.runthis("Signing cert: %s", "openssl ca -batch -out %s/outbound.crt -config ./openssl.cnf -infiles %s/inbound.csr" % (tmpfolder, tmpfolder))
os.chdir(start)
with open("%s/outbound.crt" % (tmpfolder), "r") as crtfile:
return crtfile.read()
def mkreq(bits, subject="foo", ca=0):
pk = M2Crypto.EVP.PKey()
req = M2Crypto.X509.Request()
rsa = M2Crypto.RSA.gen_key(bits, 65537, callback=lambda: None)
pk.assign_rsa(rsa)
rsa = None # should not be freed here
req.set_pubkey(pk)
req.set_subject(subject)
req.sign(pk,'sha512')
assert req.verify(pk)
pk2 = req.get_pubkey()
assert req.verify(pk2)
return req, pk
def mkcacert(subject='nova', years=1):
req, pk = mkreq(2048, subject, ca=1)
pkey = req.get_pubkey()
sub = req.get_subject()
cert = M2Crypto.X509.X509()
cert.set_serial_number(1)
cert.set_version(2)
cert.set_subject(sub) # FIXME subject is not set in mkreq yet
t = long(time.time()) + time.timezone
now = M2Crypto.ASN1.ASN1_UTCTIME()
now.set_time(t)
nowPlusYear = M2Crypto.ASN1.ASN1_UTCTIME()
nowPlusYear.set_time(t + (years * 60 * 60 * 24 * 365))
cert.set_not_before(now)
cert.set_not_after(nowPlusYear)
issuer = M2Crypto.X509.X509_Name()
issuer.C = "US"
issuer.CN = subject
cert.set_issuer(issuer)
cert.set_pubkey(pkey)
ext = M2Crypto.X509.new_extension('basicConstraints', 'CA:TRUE')
cert.add_ext(ext)
cert.sign(pk, 'sha512')
# print 'cert', dir(cert)
print cert.as_pem()
print pk.get_rsa().as_pem()
return cert, pk, pkey
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
# http://code.google.com/p/boto
def compute_md5(fp):
"""
@type fp: file
@param fp: File pointer to the file to MD5 hash. The file pointer will be
reset to the beginning of the file before the method returns.
@rtype: tuple
@return: the hex digest version of the MD5 hash
"""
m = hashlib.md5()
fp.seek(0)
s = fp.read(8192)
while s:
m.update(s)
s = fp.read(8192)
hex_md5 = m.hexdigest()
# size = fp.tell()
fp.seek(0)
return hex_md5
| [
"[email protected]"
] | |
ad21dddcaff52dd22e77f283ff4e11ab18a76100 | b8d0b260960e1c43b883049d68c15a7183df200b | /5_py_blog/blog_app/tests.py | ebafc4198267b4929abd66e68f76098e08839139 | [] | no_license | JAreina/python-django | 59ac92d0694522c1d096bed636409d9405c5caba | 66c7c301dec448217df6516198723e1ce987eab7 | refs/heads/master | 2020-03-27T18:34:59.821701 | 2018-09-07T07:49:35 | 2018-09-07T07:49:35 | 146,931,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,739 | py |
# Create your tests here.
from django.contrib.auth import get_user_model
from django.test import Client, TestCase
from django.urls import reverse
from .models import Post
class BlogTests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
username='testuser',
email='[email protected]',
password='xxxxxx'
)
self.post = Post.objects.create(
titulo='A good titulo',
texto='Nice texto content',
autor=self.user,
)
def test_string_representation(self):
post = Post(titulo='A sample titulo')
self.assertEqual(str(post), post.titulo)
def test_post_content(self):
self.assertEqual(f'{self.post.titulo}', 'A good titulo')
self.assertEqual(f'{self.post.autor}', 'testuser')
self.assertEqual(f'{self.post.texto}', 'Nice texto content')
self.assertEqual(f'{self.post.titulo}', 'A good titulo')
self.assertEqual(f'{self.post.autor}', 'testuser')
self.assertEqual(f'{self.post.texto}', 'Nice texto content')
def test_post_list_view(self):
response = self.client.get(reverse('home'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Nice texto content')
self.assertTemplateUsed(response, 'home.html')
def test_post_detail_view(self):
response = self.client.get('/post/1/')
no_response = self.client.get('/post/100000/')
self.assertEqual(response.status_code, 200)
self.assertEqual(no_response.status_code, 404)
self.assertContains(response, 'A good titulo')
self.assertTemplateUsed(response, 'post_detalle.html')
| [
"[email protected]"
] | |
a174ca449539006233ff7a4acea1252aef8eb3eb | 0ab90ab559eab46b583b4b1fdd4a5bb3f55b7793 | /python/ray/experimental/workflow/common.py | 3c40c555e0eab6747e2da0c8fe41e1c1b84e7018 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | swag1ong/ray | b22cd5ebab96c30f15b00a7d044fdeb7543a4616 | fdbeef604692aa308973988b32405ec0d70f9f40 | refs/heads/master | 2023-06-25T21:55:44.398516 | 2021-07-26T00:39:24 | 2021-07-26T00:39:24 | 389,518,857 | 2 | 0 | Apache-2.0 | 2021-07-26T05:33:40 | 2021-07-26T05:33:39 | null | UTF-8 | Python | false | false | 7,714 | py | from enum import Enum, unique
from collections import deque
import re
from typing import Dict, List, Optional, Callable, Set, Iterator, Any
import unicodedata
import uuid
from dataclasses import dataclass
import ray
from ray import ObjectRef
# Alias types
StepID = str
WorkflowOutputType = ObjectRef
@unique
class WorkflowStatus(str, Enum):
# There is at least a remote task running in ray cluster
RUNNING = "RUNNING"
# It got canceled and can't be resumed later.
CANCELED = "CANCELED"
# The workflow runs successfully.
SUCCESSFUL = "SUCCESSFUL"
# The workflow failed with an applicaiton error.
# It can be resumed.
FAILED = "FAILED"
# The workflow failed with a system error, i.e., ray shutdown.
# It can be resumed.
RESUMABLE = "RESUMABLE"
@dataclass
class WorkflowInputs:
# The object ref of the input arguments.
args: ObjectRef
# The object refs in the arguments.
object_refs: List[ObjectRef]
# TODO(suquark): maybe later we can replace it with WorkflowData.
# The workflows in the arguments.
workflows: "List[Workflow]"
@dataclass
class WorkflowData:
# The workflow step function body.
func_body: Callable
# The arguments of a workflow.
inputs: WorkflowInputs
# The num of retry for application exception
max_retries: int
# Whether the user want to handle the exception mannually
catch_exceptions: bool
# ray_remote options
ray_options: Dict[str, Any]
def to_metadata(self) -> Dict[str, Any]:
f = self.func_body
return {
"name": f.__module__ + "." + f.__qualname__,
"object_refs": [r.hex() for r in self.inputs.object_refs],
"workflows": [w.id for w in self.inputs.workflows],
"max_retries": self.max_retries,
"catch_exceptions": self.catch_exceptions,
"ray_options": self.ray_options,
}
@dataclass
class WorkflowMetaData:
# The current status of the workflow
status: WorkflowStatus
def slugify(value: str, allow_unicode=False) -> str:
"""Adopted from
https://github.com/django/django/blob/master/django/utils/text.py
Convert to ASCII if 'allow_unicode' is False. Convert spaces or repeated
dashes to single dashes. Remove characters that aren't alphanumerics,
underscores, dots or hyphens. Also strip leading and
trailing whitespace.
"""
if allow_unicode:
value = unicodedata.normalize("NFKC", value)
else:
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = re.sub(r"[^\w.\-]", "", value).strip()
return re.sub(r"[-\s]+", "-", value)
class Workflow:
def __init__(self, workflow_data: WorkflowData):
if workflow_data.ray_options.get("num_returns", 1) > 1:
raise ValueError("Workflow should have one return value.")
self._data = workflow_data
self._executed: bool = False
self._output: Optional[WorkflowOutputType] = None
self._step_id: StepID = slugify(
self._data.func_body.__qualname__) + "." + uuid.uuid4().hex
@property
def executed(self) -> bool:
return self._executed
@property
def output(self) -> WorkflowOutputType:
if not self._executed:
raise Exception("The workflow has not been executed.")
return self._output
@property
def id(self) -> StepID:
return self._step_id
def execute(self,
outer_most_step_id: Optional[StepID] = None,
last_step_of_workflow: bool = False) -> ObjectRef:
"""Trigger workflow execution recursively.
Args:
outer_most_step_id: See
"step_executor.execute_workflow" for explanation.
last_step_of_workflow: The step that generates the output of the
workflow (including nested steps).
"""
if self.executed:
return self._output
from ray.experimental.workflow import step_executor
output = step_executor.execute_workflow_step(self._step_id, self._data,
outer_most_step_id,
last_step_of_workflow)
if not isinstance(output, WorkflowOutputType):
raise TypeError("Unexpected return type of the workflow.")
self._output = output
self._executed = True
return output
def iter_workflows_in_dag(self) -> Iterator["Workflow"]:
"""Collect all workflows in the DAG linked to the workflow
using BFS."""
# deque is used instead of queue.Queue because queue.Queue is aimed
# at multi-threading. We just need a pure data structure here.
visited_workflows: Set[Workflow] = {self}
q = deque([self])
while q: # deque's pythonic way to check emptyness
w: Workflow = q.popleft()
for p in w._data.inputs.workflows:
if p not in visited_workflows:
visited_workflows.add(p)
q.append(p)
yield w
@property
def data(self) -> WorkflowData:
"""Get the workflow data."""
return self._data
def __reduce__(self):
raise ValueError(
"Workflow is not supposed to be serialized by pickle. "
"Maybe you are passing it to a Ray remote function, "
"returning it from a Ray remote function, or using "
"'ray.put()' with it?")
def run(self, workflow_id: Optional[str] = None) -> Any:
"""Run a workflow.
Examples:
>>> @workflow.step
... def book_flight(origin: str, dest: str) -> Flight:
... return Flight(...)
>>> @workflow.step
... def book_hotel(location: str) -> Reservation:
... return Reservation(...)
>>> @workflow.step
... def finalize_trip(bookings: List[Any]) -> Trip:
... return Trip(...)
>>> flight1 = book_flight.step("OAK", "SAN")
>>> flight2 = book_flight.step("SAN", "OAK")
>>> hotel = book_hotel.step("SAN")
>>> trip = finalize_trip.step([flight1, flight2, hotel])
>>> result = trip.run()
Args:
workflow_id: A unique identifier that can be used to resume the
workflow. If not specified, a random id will be generated.
"""
return ray.get(self.run_async(workflow_id))
def run_async(self, workflow_id: Optional[str] = None) -> ObjectRef:
"""Run a workflow asynchronously.
Examples:
>>> @workflow.step
... def book_flight(origin: str, dest: str) -> Flight:
... return Flight(...)
>>> @workflow.step
... def book_hotel(location: str) -> Reservation:
... return Reservation(...)
>>> @workflow.step
... def finalize_trip(bookings: List[Any]) -> Trip:
... return Trip(...)
>>> flight1 = book_flight.step("OAK", "SAN")
>>> flight2 = book_flight.step("SAN", "OAK")
>>> hotel = book_hotel.step("SAN")
>>> trip = finalize_trip.step([flight1, flight2, hotel])
>>> result = ray.get(trip.run_async())
Args:
workflow_id: A unique identifier that can be used to resume the
workflow. If not specified, a random id will be generated.
"""
# TODO(suquark): avoid cyclic importing
from ray.experimental.workflow.execution import run
return run(self, workflow_id)
| [
"[email protected]"
] | |
9a71057ca86eb6931927a2afbb8ea436b8c68c37 | afd44f9bf1469418ae4709f48f2c3c188b45eb73 | /preprocessing/text_processor.py | 88a513a6b2da416865452ab9af1cab27c4987d68 | [] | no_license | zerebom/pytoolkit | 2ed359ec0ef612461dec24b57e746f99f212d540 | 078a2fa786a755d6fe0ee69dd8caecec833fb2fa | refs/heads/master | 2020-06-29T06:20:11.069967 | 2019-09-18T01:59:14 | 2019-09-18T01:59:14 | 200,461,675 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,065 | py | import urllib.request, urllib.error
import re
import MeCab
import mojimoji
from sklearn.feature_extraction.text import TfidfVectorizer
def get_stopword()->list:
slothlib_path = 'http://svn.sourceforge.jp/svnroot/slothlib/CSharp/Version1/SlothLib/NLP/Filter/StopWord/word/Japanese.txt'
slothlib_file = urllib.request.urlopen(url=slothlib_path)
slothlib_stopwords = [line.decode("utf-8").strip() for line in slothlib_file]
slothlib_stopwords = [ss for ss in slothlib_stopwords if not ss==u'']
eng_stop=["a's" , "able" , "about" , "above" , "according" , "accordingly" , "across" , "actually" , "after" , "afterwards" , "again" , "against" , "ain't" , "all" , "allow" , "allows" , "almost" , "alone" , "along" , "already" , "also" , "although" , "always" , "am" , "among" , "amongst" , "an" , "and" , "another" , "any" , "anybody" , "anyhow" , "anyone" , "anything" , "anyway" , "anyways" , "anywhere" , "apart" , "appear" , "appreciate" , "appropriate" , "are" , "aren't" , "around" , "as" , "aside" , "ask" , "asking" , "associated" , "at" , "available" , "away" , "awfully" , "be" , "became" , "because" , "become" , "becomes" , "becoming" , "been" , "before" , "beforehand" , "behind" , "being" , "believe" , "below" , "beside" , "besides" , "best" , "better" , "between" , "beyond" , "both" , "brief" , "but" , "by" , "c'mon" , "c's" , "came" , "can" , "can't" , "cannot" , "cant" , "cause" , "causes" , "certain" , "certainly" , "changes" , "clearly" , "co" , "com" , "come" , "comes" , "concerning" , "consequently" , "consider" , "considering" , "contain" , "containing" , "contains" , "corresponding" , "could" , "couldn't" , "course" , "currently" , "definitely" , "described" , "despite" , "did" , "didn't" , "different" , "do" , "does" , "doesn't" , "doing" , "don't" , "done" , "down" , "downwards" , "during" , "each" , "edu" , "eg" , "eight" , "either" , "else" , "elsewhere" , "enough" , "entirely" , "especially" , "et" , "etc" , "even" , "ever" , "every" , "everybody" , "everyone" , "everything" , "everywhere" , "ex" , "exactly" , "example" , "except" , "far" , "few" , "fifth" , "first" , "five" , "followed" , "following" , "follows" , "for" , "former" , "formerly" , "forth" , "four" , "from" , "further" , "furthermore" , "get" , "gets" , "getting" , "given" , "gives" , "go" , "goes" , "going" , "gone" , "got" , "gotten" , "greetings" , "had" , "hadn't" , "happens" , "hardly" , "has" , "hasn't" , "have" , "haven't" , "having" , "he" , "he's" , "hello" , "help" , "hence" , "her" , "here" , "here's" , "hereafter" , "hereby" , "herein" , "hereupon" , "hers" , "herself" , "hi" , "him" , "himself" , "his" , "hither" , "hopefully" , "how" , "howbeit" , "however" , "i'd" , "i'll" , "i'm" , "i've" , "ie" , "if" , "ignored" , "immediate" , "in" , "inasmuch" , "inc" , "indeed" , "indicate" , "indicated" , "indicates" , "inner" , "insofar" , "instead" , "into" , "inward" , "is" , "isn't" , "it" , "it'd" , "it'll" , "it's" , "its" , "itself" , "just" , "keep" , "keeps" , "kept" , "know" , "known" , "knows" , "last" , "lately" , "later" , "latter" , "latterly" , "least" , "less" , "lest" , "let" , "let's" , "like" , "liked" , "likely" , "little" , "look" , "looking" , "looks" , "ltd" , "mainly" , "many" , "may" , "maybe" , "me" , "mean" , "meanwhile" , "merely" , "might" , "more" , "moreover" , "most" , "mostly" , "much" , "must" , "my" , "myself" , "name" , "namely" , "nd" , "near" , "nearly" , "necessary" , "need" , "needs" , "neither" , "never" , "nevertheless" , "new" , "next" , "nine" , "no" , "nobody" , "non" , "none" , "noone" , "nor" , "normally" , "not" , "nothing" , "novel" , "now" , "nowhere" , "obviously" , "of" , "off" , "often" , "oh" , "ok" , "okay" , "old" , "on" , "once" , "one" , "ones" , "only" , "onto" , "or" , "other" , "others" , "otherwise" , "ought" , "our" , "ours" , "ourselves" , "out" , "outside" , "over" , "overall" , "own" , "particular" , "particularly" , "per" , "perhaps" , "placed" , "please" , "plus" , "possible" , "presumably" , "probably" , "provides" , "que" , "quite" , "qv" , "rather" , "rd" , "re" , "really" , "reasonably" , "regarding" , "regardless" , "regards" , "relatively" , "respectively" , "right" , "said" , "same" , "saw" , "say" , "saying" , "says" , "second" , "secondly" , "see" , "seeing" , "seem" , "seemed" , "seeming" , "seems" , "seen" , "self" , "selves" , "sensible" , "sent" , "serious" , "seriously" , "seven" , "several" , "shall" , "she" , "should" , "shouldn't" , "since" , "six" , "so" , "some" , "somebody" , "somehow" , "someone" , "something" , "sometime" , "sometimes" , "somewhat" , "somewhere" , "soon" , "sorry" , "specified" , "specify" , "specifying" , "still" , "sub" , "such" , "sup" , "sure" , "t's" , "take" , "taken" , "tell" , "tends" , "th" , "than" , "thank" , "thanks" , "thanx" , "that" , "that's" , "thats" , "the" , "their" , "theirs" , "them" , "themselves" , "then" , "thence" , "there" , "there's" , "thereafter" , "thereby" , "therefore" , "therein" , "theres" , "thereupon" , "these" , "they" , "they'd" , "they'll" , "they're" , "they've" , "think" , "third" , "this" , "thorough" , "thoroughly" , "those" , "though" , "three" , "through" , "throughout" , "thru" , "thus" , "to" , "together" , "too" , "took" , "toward" , "towards" , "tried" , "tries" , "truly" , "try" , "trying" , "twice" , "two" , "un" , "under" , "unfortunately" , "unless" , "unlikely" , "until" , "unto" , "up" , "upon" , "us" , "use" , "used" , "useful" , "uses" , "using" , "usually" , "value" , "various" , "very" , "via" , "viz" , "vs" , "want" , "wants" , "was" , "wasn't" , "way" , "we" , "we'd" , "we'll" , "we're" , "we've" , "welcome" , "well" , "went" , "were" , "weren't" , "what" , "what's" , "whatever" , "when" , "whence" , "whenever" , "where" , "where's" , "whereafter" , "whereas" , "whereby" , "wherein" , "whereupon" , "wherever" , "whether" , "which" , "while" , "whither" , "who" , "who's" , "whoever" , "whole" , "whom" , "whose" , "why" , "will" , "willing" , "wish" , "with" , "within" , "without" , "won't" , "wonder" , "would" , "wouldn't" , "yes" , "yet" , "you" , "you'd" , "you'll" , "you're" , "you've" , "your" , "yours" , "yourself" , "yourselves" , "zero"]
sw=slothlib_stopwords+eng_stop
return sw
tagger = MeCab.Tagger('-d /usr/local/lib/mecab/dic/mecab-ipadic-neologd')
def normalize_number(text):
# 連続した数字を0で置換
text = re.sub(r'[0-9]+', '0', text)
text = re.sub(r'[10-99]+', '00', text)
text = re.sub(r'[100-999]+', '000', text)
text = re.sub(r'[1000-9999]+', '0000', text)
replaced_text = re.sub(r'[10000-9999999]+', '0000', text)
return replaced_text
def delete_number(text):
# 連続した数字を0で置換
replaced_text = re.sub(r'[0-9999999]+', '', text)
return replaced_text
# 入力されたテキストを単語単位に分割して返却する関数
def parse_text(text, min_word_len=1):
words = []
try:
tagger.parse(text).rstrip().split("\n")[:-1]
except:
return ""
for morph in tagger.parse(text).rstrip().split("\n")[:-1]:
#表層系
# word=morph.split("\t")[0]
#標準形
word = morph.split(",")[-3]
word_cls = morph.split("\t")[1].split(",")[0]
word = mojimoji.zen_to_han(word, kana=False).lower()
if not word in sw:
if len(word) > min_word_len:
#品詞によるスクリーニング
# if word_cls in ['名詞']:
words.append(delete_number(word))
return " ".join(words)
def tokenize(s):
return re.split('[ !"#$%&\'(+)*,-./:;<=>?@\\\[\]^_`{|}~“”¨«»®´·º½¾¿¡§£₤‘’。、]', s)
def get_len(text):
"""df[col]=df[col].apply(get_len)"""
num = len(text) if type(text) == str else 0
return num
tfidf_vectorizer = TfidfVectorizer(max_df=0.5, min_df=1,
max_features=10000, norm='l2',
tokenizer=tokenize, ngram_range=(1, 2))
| [
"[email protected]"
] | |
63d50f46e6763c50b438c35733b409c516416606 | 33cff13b90fdd628560baef8b3f6d68ceaad912c | /tests/test_commands/test_package_downloads.py | e4b7b094ed22878a396f1c1e911369fd769b9165 | [
"MIT"
] | permissive | rosdyana/dephell | 3139140d6f16288177705020a625897f91f2514b | 993a212ce17dda04a878ceac64854d809f3dc47b | refs/heads/master | 2020-08-06T09:38:21.150070 | 2019-09-27T16:58:23 | 2019-09-27T16:58:23 | 212,927,181 | 0 | 0 | MIT | 2019-10-05T01:22:23 | 2019-10-05T01:22:23 | null | UTF-8 | Python | false | false | 708 | py | # built-in
import json
# external
import pytest
# project
from dephell.commands import PackageDownloadsCommand
from dephell.config import Config
@pytest.mark.skipif(True, reason='disable while pypistat is down')
@pytest.mark.allow_hosts()
def test_package_downloads_command(capsys):
config = Config()
config.attach({
'level': 'WARNING',
'silent': True,
})
command = PackageDownloadsCommand(argv=['DJANGO'], config=config)
result = command()
captured = capsys.readouterr()
output = json.loads(captured.out)
assert result is True
assert len(output['pythons']) > 4
assert len(output['systems']) > 2
assert '█' in output['pythons'][0]['chart']
| [
"[email protected]"
] | |
1a57dcb6dd5bc694a8c241ff875abb2a00b8f021 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Prime Services/FPythonCode/PaymentFees.py | d79409eb38743fa11ab65e6b6c2c6f2b1438516b | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,474 | py | """-----------------------------------------------------------------------
MODULE
PaymentFees
DESCRIPTION
Date : 2012-09-19
Purpose : Returns the payment fees of a trade
Department and Desk : Prime Services
Requester : Danilo Mantoan
Developer : Nidheesh Sharma
CR Number : 556348
ENDDESCRIPTION
HISTORY
Date: CR Number: Developer: Description:
2013-03-19 C885651 Nidheesh Sharma Excluded INS, SET, Brokerage fees from OtherFee
2014-03-12 C1819376 Hynek Urban Refactor & minor bug fix of other fees.
2018-11-22 1001164411 Ondrej Bahounek ABITFA-5622: Convert Other Fees to trade currency.
2018-11-28 Jaco Swanepoel Payment migration: convert cash payments to appropriate new additional payment types.
-----------------------------------------------------------------------"""
import acm
FX_COLUMN_ID = 'FX Rate On Display Curr'
CS = acm.Calculations().CreateCalculationSpace(acm.GetDefaultContext(), 'FPortfolioSheet')
ZAR_CUR = acm.FCurrency['ZAR']
PAYMENT_TYPES_TO_EXCLUDE = ('Premium',
'Dividend Suppression',
'INS',
'SET',
'Brokerage Vatable',
'Execution Fee',
'Aggregated Settled',
'Aggregated Accrued',
'Aggregated Funding',
'Aggregated Dividends',
'Aggregated Depreciation',
'Aggregated Future Settle',
'Aggregated Forward Funding PL',
'Aggregated Cash Open Value',
'Aggregated Cash Position',
'Aggregated Forward Premium',
'Aggregated Forward Settled',
'Aggregated Forward Dividends',
'Aggregated Forward Position')
PAYMENT_TEXTS_TO_EXCLUDE = ('Execution', 'ExecutionFee', 'INS', 'SET', 'Brokerage')
def ReturnOtherFee(trade, val_date):
"""
Return the sum of all fees of a trade up to the specified date.
Fees of type Execution Fee, INS, SET and Brokerage and any payments of type
Aggregated Settled are excluded.
"""
CS.SimulateGlobalValue('Valuation Date', val_date)
CS.SimulateGlobalValue('Portfolio Profit Loss End Date', 'Custom Date')
CS.SimulateGlobalValue('Portfolio Profit Loss End Date Custom', val_date)
sumOfOtherFees = 0
if trade.Status() not in ('Void'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in PAYMENT_TYPES_TO_EXCLUDE or\
payment.Text() in PAYMENT_TEXTS_TO_EXCLUDE:
continue
if payment.ValidFrom() > val_date:
continue
amount = payment.Amount()
if ZAR_CUR.Name() != payment.Currency().Name():
# Ondrej's note:
# Convert all non-ZAR payments to ZAR.
# This should be ideally converted to trade currency,
# but then many other attributes need to be changed and well tested.
# This is just a fix to accommodate Futs on FXs by the end of the month.
CS.SimulateValue(ZAR_CUR, "Portfolio Currency", payment.Currency())
fx_rate = CS.CreateCalculation(ZAR_CUR, FX_COLUMN_ID).Value().Number()
amount *= fx_rate
sumOfOtherFees += amount
return acm.DenominatedValue(sumOfOtherFees, ZAR_CUR.Name(), None, val_date)
#Function to return termination fee of a trade
def ReturnTerminationFee(trade):
terminationFee = 0
if trade.Status() in ('Terminated'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in ('Cash') and ('Termination' in payment.Text() or 'Terminated' in payment.Text()):
terminationFee = terminationFee + payment.Amount()
elif payment.Type() in ('Termination Fee'):
terminationFee = terminationFee + payment.Amount()
return terminationFee
#Function to return termination fee date of a trade
def ReturnTerminationFeeDate(trade):
terminationDate = ''
if trade.Status() in ('Terminated'):
payments = trade.Payments()
for payment in payments:
if payment.Type() in ('Cash') and ('Termination' in payment.Text() or 'Terminated' in payment.Text()):
terminationDate = payment.PayDay()
elif payment.Type() in ('Termination Fee'):
terminationDate = payment.PayDay()
return terminationDate
#Function to return termination fee date of a trade in the correct format from an array of dates
def ReturnSingleTerminationFeeDate(arrayOfDates):
terminationDate = ''
for date in arrayOfDates:
if date != '' and isinstance(date, str):
dateFormatter = acm.FDateFormatter('dateFormatter')
dateFormatter.FormatDefinition("%d/%m/%Y")
terminationDate = dateFormatter.Format(date)#.replace('-','/')
break
return terminationDate
| [
"[email protected]"
] | |
5bc96ed5b2ff7057cfe5cf0f85b1852e0b311584 | afa0d5a97925273f7fb0befef697d36020df5787 | /packages/google-cloud-alloydb/google/cloud/alloydb_v1beta/services/alloy_db_admin/pagers.py | 1c442de8074c691d92fdefd2aa87e57390df9038 | [
"Apache-2.0"
] | permissive | scooter4j/google-cloud-python | dc7ae1ba6a33a62a40b617b806ec8ed723046b8b | 36b1cf08092d5c07c5971bb46edda7a9928166b1 | refs/heads/master | 2023-04-14T18:36:48.643436 | 2023-04-06T13:19:26 | 2023-04-06T13:19:26 | 188,338,673 | 0 | 0 | null | 2019-05-24T02:27:15 | 2019-05-24T02:27:14 | null | UTF-8 | Python | false | false | 20,951 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Iterator,
Optional,
Sequence,
Tuple,
)
from google.cloud.alloydb_v1beta.types import resources, service
class ListClustersPager:
"""A pager for iterating through ``list_clusters`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListClustersResponse` object, and
provides an ``__iter__`` method to iterate through its
``clusters`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListClusters`` requests and continue to iterate
through the ``clusters`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListClustersResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListClustersResponse],
request: service.ListClustersRequest,
response: service.ListClustersResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListClustersRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListClustersResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListClustersRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListClustersResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Cluster]:
for page in self.pages:
yield from page.clusters
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListClustersAsyncPager:
"""A pager for iterating through ``list_clusters`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListClustersResponse` object, and
provides an ``__aiter__`` method to iterate through its
``clusters`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListClusters`` requests and continue to iterate
through the ``clusters`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListClustersResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListClustersResponse]],
request: service.ListClustersRequest,
response: service.ListClustersResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListClustersRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListClustersResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListClustersRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListClustersResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Cluster]:
async def async_generator():
async for page in self.pages:
for response in page.clusters:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListInstancesPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse` object, and
provides an ``__iter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListInstancesResponse],
request: service.ListInstancesRequest,
response: service.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListInstancesRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Instance]:
for page in self.pages:
yield from page.instances
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListInstancesAsyncPager:
"""A pager for iterating through ``list_instances`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse` object, and
provides an ``__aiter__`` method to iterate through its
``instances`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListInstances`` requests and continue to iterate
through the ``instances`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListInstancesResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListInstancesResponse]],
request: service.ListInstancesRequest,
response: service.ListInstancesResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListInstancesRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListInstancesResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListInstancesRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListInstancesResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Instance]:
async def async_generator():
async for page in self.pages:
for response in page.instances:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBackupsPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse` object, and
provides an ``__iter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListBackupsResponse],
request: service.ListBackupsRequest,
response: service.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListBackupsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.Backup]:
for page in self.pages:
yield from page.backups
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListBackupsAsyncPager:
"""A pager for iterating through ``list_backups`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``backups`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListBackups`` requests and continue to iterate
through the ``backups`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListBackupsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListBackupsResponse]],
request: service.ListBackupsRequest,
response: service.ListBackupsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListBackupsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListBackupsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListBackupsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListBackupsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.Backup]:
async def async_generator():
async for page in self.pages:
for response in page.backups:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSupportedDatabaseFlagsPager:
"""A pager for iterating through ``list_supported_database_flags`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse` object, and
provides an ``__iter__`` method to iterate through its
``supported_database_flags`` field.
If there are more pages, the ``__iter__`` method will make additional
``ListSupportedDatabaseFlags`` requests and continue to iterate
through the ``supported_database_flags`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., service.ListSupportedDatabaseFlagsResponse],
request: service.ListSupportedDatabaseFlagsRequest,
response: service.ListSupportedDatabaseFlagsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListSupportedDatabaseFlagsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[service.ListSupportedDatabaseFlagsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[resources.SupportedDatabaseFlag]:
for page in self.pages:
yield from page.supported_database_flags
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
class ListSupportedDatabaseFlagsAsyncPager:
"""A pager for iterating through ``list_supported_database_flags`` requests.
This class thinly wraps an initial
:class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse` object, and
provides an ``__aiter__`` method to iterate through its
``supported_database_flags`` field.
If there are more pages, the ``__aiter__`` method will make additional
``ListSupportedDatabaseFlags`` requests and continue to iterate
through the ``supported_database_flags`` field on the
corresponding responses.
All the usual :class:`google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., Awaitable[service.ListSupportedDatabaseFlagsResponse]],
request: service.ListSupportedDatabaseFlagsRequest,
response: service.ListSupportedDatabaseFlagsResponse,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiates the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsRequest):
The initial request object.
response (google.cloud.alloydb_v1beta.types.ListSupportedDatabaseFlagsResponse):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = service.ListSupportedDatabaseFlagsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
async def pages(self) -> AsyncIterator[service.ListSupportedDatabaseFlagsResponse]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = await self._method(self._request, metadata=self._metadata)
yield self._response
def __aiter__(self) -> AsyncIterator[resources.SupportedDatabaseFlag]:
async def async_generator():
async for page in self.pages:
for response in page.supported_database_flags:
yield response
return async_generator()
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
| [
"[email protected]"
] | |
b1efe20d5ba4c2a9c279544113a1e2bd6cdf7018 | 2432996ac1615cd36d61f0feeff8a359d2b438d8 | /env/lib/python3.8/site-packages/_pyinstaller_hooks_contrib/hooks/stdhooks/hook-eth_hash.py | 1b22c286fe3f7300f269b0ec19044cd2c28cc11a | [
"GPL-1.0-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"Apache-2.0"
] | permissive | Parveshdhull/AutoTyper | dd65d53ece7c13fbc1ead7ce372947483e05e2e3 | 7fabb30e15b770d790b69c2e4eaf9bbf5a4d180c | refs/heads/main | 2023-05-08T14:10:35.404160 | 2023-05-07T20:43:15 | 2023-05-07T20:43:15 | 315,415,751 | 26 | 18 | Apache-2.0 | 2023-05-07T20:43:16 | 2020-11-23T19:13:05 | Python | UTF-8 | Python | false | false | 611 | py | # ------------------------------------------------------------------
# Copyright (c) 2020 PyInstaller Development Team.
#
# This file is distributed under the terms of the GNU General Public
# License (version 2.0 or later).
#
# The full license is available in LICENSE.GPL.txt, distributed with
# this software.
#
# SPDX-License-Identifier: GPL-2.0-or-later
# ------------------------------------------------------------------
from PyInstaller.utils.hooks import collect_submodules
# The ``eth_hash.utils.load_backend`` function does a dynamic import.
hiddenimports = collect_submodules('eth_hash.backends')
| [
"[email protected]"
] | |
90ebb27f00615a63b07c8ff1cd495f77293c88ea | 8f784ca91cd56818dc6e38d5e602756a913e13b4 | /modbus_tcp_server/network/accept_thread.py | a512980848dd5a91ed2ce730cf546634df5968c6 | [
"MIT"
] | permissive | smok-serwis/modbus-tcp-server | 9a02a3c5e9d0875179903bc4171b4d782d6d48b9 | 558eca908b6762280a74b16d78d56dc047a9dace | refs/heads/master | 2023-06-14T01:26:07.299860 | 2021-07-15T13:59:15 | 2021-07-15T13:59:15 | 339,780,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,383 | py | import socket
import typing as tp
from satella.coding import silence_excs
from satella.coding.concurrent import TerminableThread
from .conn_thread import ConnectionThread
from ..data_source import BaseDataSource, TestingDataSource
from ..datagrams import MODBUSTCPMessage
from ..processor import ModbusProcessor
class ModbusTCPServer(TerminableThread):
def __init__(self, bind_ifc: str, bind_port: int,
data_source: tp.Optional[BaseDataSource] = None,
backlog: int = 128):
super().__init__(name='accept')
if data_source is None:
data_source = TestingDataSource()
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind((bind_ifc, bind_port))
self.backlog = backlog
self.processor = ModbusProcessor(data_source)
def prepare(self) -> None:
self.socket.listen(self.backlog)
self.socket.setblocking(True)
self.socket.settimeout(5)
def process_message(self, msg: MODBUSTCPMessage) -> MODBUSTCPMessage:
return self.processor.process(msg)
def cleanup(self):
self.socket.close()
@silence_excs(socket.timeout)
def loop(self) -> None:
sock, addr = self.socket.accept()
ConnectionThread(sock, addr, self).start()
| [
"[email protected]"
] | |
ebc97dabe6ba4cd2d87aca268755945115d291e2 | 3447227dd54587eb8c0c7f5346ac158504f7a907 | /compass/ocean/tests/global_ocean/threads_test/__init__.py | 42883b53b746d85a52e069468c8ae411ba7c414e | [
"LicenseRef-scancode-warranty-disclaimer",
"BSD-2-Clause"
] | permissive | MPAS-Dev/compass | 5e2c1525224dd399bcf4f56f661df05e2ec197a6 | 0b7440f0aa77c1ae052922a39e646bd35c267661 | refs/heads/main | 2023-08-30T20:59:52.052430 | 2023-08-29T09:45:14 | 2023-08-29T09:45:14 | 310,409,977 | 10 | 26 | NOASSERTION | 2023-09-13T14:19:16 | 2020-11-05T20:28:25 | Python | UTF-8 | Python | false | false | 2,046 | py | from compass.validate import compare_variables
from compass.ocean.tests.global_ocean.forward import ForwardTestCase, \
ForwardStep
class ThreadsTest(ForwardTestCase):
"""
A test case for performing two short forward runs to make sure the results
are identical with 1 and 2 thread per MPI process
"""
def __init__(self, test_group, mesh, init, time_integrator):
"""
Create test case
Parameters
----------
test_group : compass.ocean.tests.global_ocean.GlobalOcean
The global ocean test group that this test case belongs to
mesh : compass.ocean.tests.global_ocean.mesh.Mesh
The test case that produces the mesh for this run
init : compass.ocean.tests.global_ocean.init.Init
The test case that produces the initial condition for this run
time_integrator : {'split_explicit', 'RK4'}
The time integrator to use for the forward run
"""
super().__init__(test_group=test_group, mesh=mesh, init=init,
time_integrator=time_integrator,
name='threads_test')
for openmp_threads in [1, 2]:
name = f'{openmp_threads}thread'
step = ForwardStep(test_case=self, mesh=mesh, init=init,
time_integrator=time_integrator, name=name,
subdir=name, ntasks=4,
openmp_threads=openmp_threads)
step.add_output_file(filename='output.nc')
self.add_step(step)
# no run() method is needed
def validate(self):
"""
Test cases can override this method to perform validation of variables
and timers
"""
variables = ['temperature', 'salinity', 'layerThickness',
'normalVelocity']
compare_variables(test_case=self, variables=variables,
filename1='1thread/output.nc',
filename2='2thread/output.nc')
| [
"[email protected]"
] | |
ecfbc6fbd378e0f496251bdb6fea828ba8ec686d | 09d6a9e95f0156e577e068899f20959abb0f733a | /train/trainers/trainer_controller.py | 483cbb084ec931370e279710580127352d459a36 | [] | no_license | miyosuda/animalai | 45267cd6dc63306e97f28d2217046f9e10b5b460 | 94d9d5e3acc593da878fa3dc3f38348567417578 | refs/heads/master | 2020-07-10T06:31:29.100908 | 2020-01-17T19:26:34 | 2020-01-17T19:26:34 | 204,193,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,122 | py | # -*- coding: utf-8 -*-
# # Unity ML-Agents Toolkit
# ## ML-Agent Learning
"""Launches trainers for each External Brains in a Unity Environment."""
import os
import logging
import shutil
import sys
from typing import *
import numpy as np
import tensorflow as tf
from animalai.envs import BrainInfo
from animalai.envs.exception import UnityEnvironmentException
from trainers.ppo.trainer import PPOTrainer
class TrainerController(object):
def __init__(self,
model_path: str,
summaries_dir: str,
run_id: str,
save_freq: int,
load: bool,
train: bool,
keep_checkpoints: int,
lesson: Optional[int],
external_brains: Dict[str, BrainInfo],
training_seed: int,
config=None):
"""
Arguments:
model_path:
Path to save the model.
summaries_dir:
Folder to save training summaries.
run_id:
The sub-directory name for model and summary statistics
save_freq:
Frequency at which to save model
load:
Whether to load the model or randomly initialize.
train:
Whether to train model, or only run inference.
keep_checkpoints:
How many model checkpoints to keep.
lesson:
Start learning from this lesson.
external_brains:
dictionary of external brain names to BrainInfo objects.
training_seed:
Seed to use for Numpy and Tensorflow random number generation.
"""
self.model_path = model_path
self.summaries_dir = summaries_dir
self.external_brains = external_brains
self.external_brain_names = external_brains.keys()
self.logger = logging.getLogger('mlagents.envs')
self.run_id = run_id
self.save_freq = save_freq
self.lesson = lesson
self.load_model = load
self.train_model = train
self.keep_checkpoints = keep_checkpoints
self.trainers = {}
self.global_step = 0
self.seed = training_seed
self.config = config
self.update_config = True
np.random.seed(self.seed)
tf.set_random_seed(self.seed)
def _get_measure_vals(self):
return None
def _save_model(self, steps=0):
"""
Saves current model to checkpoint folder.
steps:
Current number of steps in training process.
saver:
Tensorflow saver for session.
"""
for brain_name in self.trainers.keys():
self.trainers[brain_name].save_model()
self.logger.info('Saved Model')
def _save_model_when_interrupted(self, steps=0):
self.logger.info('Learning was interrupted. Please wait '
'while the graph is generated.')
self._save_model(steps)
def initialize_trainers(self, trainer_config):
"""
Initialization of the trainers
trainer_config:
The configurations of the trainers
"""
trainer_parameters_dict = {}
for brain_name in self.external_brains:
# brain_nameは "Leaner"
trainer_parameters = trainer_config['default'].copy()
trainer_parameters['summary_path'] = '{basedir}/{name}'.format(
basedir=self.summaries_dir,
name=str(self.run_id) + '_' + brain_name)
trainer_parameters['model_path'] = '{basedir}/{name}'.format(
basedir=self.model_path,
name=brain_name)
trainer_parameters['keep_checkpoints'] = self.keep_checkpoints
if brain_name in trainer_config:
_brain_key = brain_name # "Learner"
while not isinstance(trainer_config[_brain_key], dict):
_brain_key = trainer_config[_brain_key]
for k in trainer_config[_brain_key]:
trainer_parameters[k] = trainer_config[_brain_key][k]
trainer_parameters_dict[brain_name] = trainer_parameters.copy()
for brain_name in self.external_brains:
if trainer_parameters_dict[brain_name]['trainer'] == 'ppo':
# ここで PPOTrainer 生成
self.trainers[brain_name] = PPOTrainer(
self.external_brains[brain_name],
0,
trainer_parameters_dict[brain_name], # trainer_configで指定した内容
self.train_model,
self.load_model,
self.seed,
self.run_id)
else:
raise UnityEnvironmentException('The trainer config contains '
'an unknown trainer type for '
'brain {}'
.format(brain_name))
@staticmethod
def _create_model_path(model_path):
try:
if not os.path.exists(model_path):
os.makedirs(model_path)
except Exception:
raise UnityEnvironmentException('The folder {} containing the '
'generated model could not be '
'accessed. Please make sure the '
'permissions are set correctly.'
.format(model_path))
def _reset_env(self, env):
"""Resets the environment.
Returns:
A Data structure corresponding to the initial reset state of the
environment.
"""
if self.update_config:
return env.reset(arenas_configurations=self.config)
self.update_config = False
else:
return env.reset()
def start_learning(self, env, trainer_config):
# TODO: Should be able to start learning at different lesson numbers
# for each curriculum.
self._create_model_path(self.model_path)
tf.reset_default_graph()
# Prevent a single session from taking all GPU memory.
# PPOTrainer を生成
self.initialize_trainers(trainer_config)
for _, t in self.trainers.items():
self.logger.info(t)
curr_info = self._reset_env(env)
# Tensorboardにハイパーパラメータを記録
if self.train_model:
for brain_name, trainer in self.trainers.items():
trainer.write_tensorboard_text('Hyperparameters', trainer.parameters)
try:
# 学習ループ
while any([t.get_step <= t.get_max_steps for k, t in self.trainers.items()]) \
or not self.train_model:
# 学習を1ステップ進める
new_info = self.take_step(env, curr_info)
self.global_step += 1
if self.global_step % self.save_freq == 0 and self.global_step != 0 \
and self.train_model:
# 学習モデルの保存
self._save_model(steps=self.global_step)
curr_info = new_info
if self.global_step != 0 and self.train_model:
# 最後にモデルを保存
self._save_model(steps=self.global_step)
except KeyboardInterrupt:
if self.train_model:
self._save_model_when_interrupted(steps=self.global_step)
pass
env.close()
def take_step(self, env, curr_info):
# If any lessons were incremented or the environment is ready to be reset
if env.global_done:
curr_info = self._reset_env(env)
for brain_name, trainer in self.trainers.items():
trainer.end_episode()
# Decide and take an action
take_action_vector, \
take_action_memories, \
take_action_text, \
take_action_value, \
take_action_outputs \
= {}, {}, {}, {}, {}
for brain_name, trainer in self.trainers.items():
# Actionを決定する. 全arena分の配列になっている.
(take_action_vector[brain_name], # 発行するAction
take_action_memories[brain_name], # None (use_recurrent時に利用)
take_action_text[brain_name], # 常にNone
take_action_value[brain_name], # 各Arenaに一つの値
take_action_outputs[brain_name]) = trainer.take_action(curr_info)
# take_action_outputsは action, log_probs, value, entropy, etc...
# 選んだActionによって環境を1 step進める
new_info = env.step(vector_action=take_action_vector,
memory=take_action_memories,
text_action=take_action_text,
value=take_action_value)
# BrainInfoには visual_observations, vector_observations, memories, rewards, local_done,
# etc.. が入っている.
# visual_observations = (4, 84, 84, 3) など
for brain_name, trainer in self.trainers.items():
# ExperienceBufferに貯める
trainer.add_experiences(curr_info, new_info, take_action_outputs[brain_name])
trainer.process_experiences(curr_info, new_info)
if trainer.is_ready_update() and self.train_model \
and trainer.get_step <= trainer.get_max_steps:
# ExperienceBuffer に溜まった内容で Policy の学習をSGDで行う.
trainer.update_policy()
# Write training statistics to Tensorboard.
trainer.write_summary(self.global_step)
if self.train_model and trainer.get_step <= trainer.get_max_steps:
trainer.increment_step_and_update_last_reward()
return new_info
| [
"[email protected]"
] | |
631a2dcb65f7b01f394a4887810810476c69ec19 | 933376c11498a6567da8d7eb7d2675100895c3ba | /pyzoo/zoo/chronos/forecaster/tcn_forecaster.py | 1d2359d1cc2e54a9820e4f91c65c4ff5cd87761b | [
"Apache-2.0"
] | permissive | intel-analytics/analytics-zoo | 320a461765f86d41dd456b598b1cf1d51d57f4c4 | 7cc3e2849057d6429d03b1af0db13caae57960a5 | refs/heads/master | 2023-08-13T20:47:58.621714 | 2023-07-06T00:49:11 | 2023-07-06T00:49:11 | 90,328,920 | 3,104 | 996 | Apache-2.0 | 2023-09-06T01:51:18 | 2017-05-05T02:27:30 | Jupyter Notebook | UTF-8 | Python | false | false | 5,894 | py | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.chronos.forecaster.base_forecaster import BasePytorchForecaster
from zoo.chronos.forecaster.utils import set_pytorch_seed
from zoo.chronos.model.tcn import TCNPytorch
from zoo.chronos.model.tcn import model_creator, optimizer_creator, loss_creator
class TCNForecaster(BasePytorchForecaster):
"""
Example:
>>> #The dataset is split into x_train, x_val, x_test, y_train, y_val, y_test
>>> forecaster = TCNForecaster(past_seq_len=24,
future_seq_len=5,
input_feature_num=1,
output_feature_num=1,
...)
>>> forecaster.fit((x_train, y_train))
>>> forecaster.to_local() # if you set distributed=True
>>> test_pred = forecaster.predict(x_test)
>>> test_eval = forecaster.evaluate((x_test, y_test))
>>> forecaster.save({ckpt_name})
>>> forecaster.restore({ckpt_name})
"""
def __init__(self,
past_seq_len,
future_seq_len,
input_feature_num,
output_feature_num,
num_channels=[30]*7,
kernel_size=3,
repo_initialization=True,
dropout=0.1,
optimizer="Adam",
loss="mse",
lr=0.001,
metrics=["mse"],
seed=None,
distributed=False,
workers_per_node=1,
distributed_backend="torch_distributed"):
"""
Build a TCN Forecast Model.
TCN Forecast may fall into local optima. Please set repo_initialization
to False to alleviate the issue. You can also change a random seed to
work around.
:param past_seq_len: Specify the history time steps (i.e. lookback).
:param future_seq_len: Specify the output time steps (i.e. horizon).
:param input_feature_num: Specify the feature dimension.
:param output_feature_num: Specify the output dimension.
:param num_channels: Specify the convolutional layer filter number in
TCN's encoder. This value defaults to [30]*7.
:param kernel_size: Specify convolutional layer filter height in TCN's
encoder. This value defaults to 3.
:param repo_initialization: if to use framework default initialization,
True to use paper author's initialization and False to use the
framework's default initialization. The value defaults to True.
:param dropout: Specify the dropout close possibility (i.e. the close
possibility to a neuron). This value defaults to 0.1.
:param optimizer: Specify the optimizer used for training. This value
defaults to "Adam".
:param loss: Specify the loss function used for training. This value
defaults to "mse". You can choose from "mse", "mae" and
"huber_loss".
:param lr: Specify the learning rate. This value defaults to 0.001.
:param metrics: A list contains metrics for evaluating the quality of
forecasting. You may only choose from "mse" and "mae" for a
distributed forecaster. You may choose from "mse", "me", "mae",
"mse","rmse","msle","r2", "mpe", "mape", "mspe", "smape", "mdape"
and "smdape" for a non-distributed forecaster.
:param seed: int, random seed for training. This value defaults to None.
:param distributed: bool, if init the forecaster in a distributed
fashion. If True, the internal model will use an Orca Estimator.
If False, the internal model will use a pytorch model. The value
defaults to False.
:param workers_per_node: int, the number of worker you want to use.
The value defaults to 1. The param is only effective when
distributed is set to True.
:param distributed_backend: str, select from "torch_distributed" or
"horovod". The value defaults to "torch_distributed".
"""
# config setting
self.data_config = {
"past_seq_len": past_seq_len,
"future_seq_len": future_seq_len,
"input_feature_num": input_feature_num,
"output_feature_num": output_feature_num
}
self.config = {
"lr": lr,
"loss": loss,
"num_channels": num_channels,
"kernel_size": kernel_size,
"repo_initialization": repo_initialization,
"optim": optimizer,
"dropout": dropout
}
# model creator settings
self.local_model = TCNPytorch
self.model_creator = model_creator
self.optimizer_creator = optimizer_creator
self.loss_creator = loss_creator
# distributed settings
self.distributed = distributed
self.distributed_backend = distributed_backend
self.workers_per_node = workers_per_node
# other settings
self.lr = lr
self.metrics = metrics
self.seed = seed
super().__init__()
| [
"[email protected]"
] | |
92296cabb36cdc43ac8a55f79c416c6d3190cc2b | f332244831040530c8d4d3ff42ee4e06078ca22b | /cart/views.py | eed5c0e9c50a9e6889b30cec001caa5258639029 | [] | no_license | worlddeleteRin/cosmetics | f8f1bd8a3d9b6b149ae29126fa6f4bd6bb5e72b1 | f7d593f3206606d24084d6281bd6d5472654da25 | refs/heads/master | 2023-03-04T04:34:59.349269 | 2021-02-13T19:43:37 | 2021-02-13T19:43:37 | 296,117,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,561 | py | from django.shortcuts import render
from django.http import HttpResponse, HttpResponse, HttpResponseRedirect, JsonResponse
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from .models import *
from products.models import *
from collections import defaultdict
import pandas as pd
import urllib.parse
# to serialize to json format
from django.core import serializers
from django.core.mail import send_mail
from django.conf import settings
from django.template.loader import render_to_string
from django.utils.html import strip_tags
# Create your views here.
def index(request):
allcategories = Category.objects.all()
allbrands = Brand.objects.all()
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(
session_key = current_session_key
)
cart_items = cart[0].item_set.all()
return render(request, 'cart/index.html', {
'allbrands': allbrands,
'categories': allcategories,
'session_key': current_session_key,
'items': cart_items,
'current_cart': cart[0],
})
def add_item(request, product_id):
product = Product.objects.get(id = product_id)
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
cart_items = cart.item_set.all()
if Item.objects.filter(cart = cart, name = product.name).exists():
current_item = Item.objects.get(cart = cart, name = product.name)
current_item.quantity += 1
current_item.save()
else:
new_item = Item(
cart = cart,
name = product.name,
price = product.price,
sale_price = product.sale_price,
imgurl = product.imgurl,
brand = product.pr_brand.name,
series = product.pr_series.name,
obiem = product.obiem,
)
new_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def add_quantity(request, item_id):
current_item = Item.objects.get(id = item_id)
current_item.quantity += 1
current_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def remove_quantity(request, item_id):
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
current_item = Item.objects.get(id = item_id)
if current_item.quantity == 1:
current_item.delete()
else:
current_item.quantity -= 1
current_item.save()
return HttpResponseRedirect(reverse('cart:index'))
def remove_item_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
if current_item.quantity == 1:
current_item.delete()
return JsonResponse({
'message': 'everything is ok',
'need_delete': 'yes',
}, status = 200)
check_items = cart.if_items_empty()
if check_items == True:
cart.promo = None
cart.save()
else:
current_item.quantity -= 1
current_item.save()
quantity = current_item.quantity
return JsonResponse({
'message': 'everything is ok',
'need_delete': 'no',
'quantity': quantity,
}, status = 200)
def add_item_ajax(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
current_item.quantity += 1
current_item.save()
quantity = current_item.quantity
return JsonResponse({
'message': 'everything is ok',
'quantity': quantity,
}, status = 200)
def update_item_amount_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
item_id = request.GET['item_id']
current_item = Item.objects.get(id = item_id, cart = cart)
if current_item.sale_price:
amount = current_item.quantity * current_item.sale_price
else:
amount = current_item.quantity * current_item.price
return JsonResponse({
'message': 'everything is ok',
'item_amount': amount,
}, status = 200)
def update_total_amount_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
total_amount = cart.get_total()
if cart.promo != None:
total_amount_promo = cart.get_total_promo()
has_promo = 'true'
return JsonResponse({
'total_amount_promo': total_amount_promo,
'message': 'everything is ok',
'total_amount': total_amount,
'has_promo': has_promo,
}, status = 200)
else:
has_promo = 'false'
return JsonResponse({
'message': 'everything is ok',
'total_amount': total_amount,
'has_promo': has_promo,
}, status = 200)
def remove_item_from_cart_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
current_item_id = request.GET['item_id']
print('item id is',current_item_id )
current_item = Item.objects.get(cart = cart, id = current_item_id)
print(current_item)
current_item.delete()
check_items = cart.if_items_empty()
if check_items == True:
cart.promo = None
cart.save()
return JsonResponse({
'message': 'everything is ok',
}, status = 200)
def add_to_cart_ajax(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
current_product_id = request.GET['product_id']
current_product = Product.objects.get(id = current_product_id)
message = ""
if Item.objects.filter(cart = cart, name = current_product.name,
price = current_product.price).exists():
item = Item.objects.get(cart = cart, name = current_product.name,
price = current_product.price)
item.quantity += 1
item.save()
else:
item = Item(
cart = cart,
name = current_product.name,
price = current_product.price,
sale_price = current_product.sale_price,
imgurl = current_product.imgurl,
brand = current_product.pr_brand.name,
series = current_product.pr_series.name,
obiem = current_product.obiem,
)
item.save()
return JsonResponse({
'message': 'Товар добавлен в корзину!',
}, status = 200)
def create_order_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
cart_items = cart.item_set.all()
# parse cart info
delivery_method = request.GET['delivery_method']
delivery_method = urllib.parse.unquote(delivery_method)
delivery_cost = request.GET['delivery_cost']
payment_method = request.GET['payment_method']
payment_method = urllib.parse.unquote(payment_method)
customer_name = request.GET['customer_name']
customer_name = urllib.parse.unquote(customer_name)
customer_phone = request.GET['customer_phone']
customer_city = request.GET['customer_city']
customer_city = urllib.parse.unquote(customer_city)
customer_address = request.GET['customer_address']
customer_address = urllib.parse.unquote(customer_address)
order_comment = request.GET['cart_comment']
order_comment = urllib.parse.unquote(order_comment)
customer_email = request.GET['customer_email']
customer_email = urllib.parse.unquote(customer_email)
order_price = int(delivery_cost) + cart.get_total_promo()
new_order = Orders(
name = customer_name,
phone = customer_phone,
email = customer_email,
delivery = delivery_method + ' ' + delivery_cost,
payment = payment_method,
city = customer_city,
address = customer_address,
order_price = order_price,
comment = order_comment,
)
new_order.save()
cart_items_mail = []
order_price_mail = order_price
order_comment_mail = order_comment
customer_address_mail = customer_city + ', ' + customer_address
delivery_method_mail = delivery_method
order_id = new_order.id
for item in cart_items:
new_order.item_set.add(item)
cart_items_mail.append([item.name, item.quantity, item.price])
for item in cart.item_set.all():
cart.item_set.remove(item)
cart.promo = None
cart.save()
cart_items_all = new_order.item_set.all()
context = {
'order_id': order_id,
'order_price_mail': order_price_mail,
'name': customer_name,
'phone': customer_phone,
'email': customer_email,
'delivery_address': customer_address_mail,
'delivery_cost': delivery_cost,
'cart_items_all': cart_items_all,
'delivery_method_mail': delivery_method_mail,
'order_comment_mail': order_comment_mail,
}
client_html_message = render_to_string('cart/blocks/order_mail_template.html', context)
client_html_message_plain = strip_tags(client_html_message)
admin_html_message = render_to_string('cart/blocks/order_mail_template_admin.html', context)
admin_html_message_plain = strip_tags(admin_html_message)
try:
send_mail(
'Заказ № {}'.format(order_id),
admin_html_message_plain,
settings.EMAIL_HOST_USER,
[
# '[email protected]',
'[email protected]'
],
html_message = admin_html_message
)
print('mail is sent')
print('try to send mail')
send_mail(
'Заказ № {}'.format(order_id),
client_html_message_plain,
settings.EMAIL_HOST_USER,
[
customer_email,
# '[email protected]'
],
html_message = client_html_message
)
except:
print('was an error when send mail')
return JsonResponse({
'order_created': 'yes',
}, status = 200)
def update_nav_total(request):
if not request.session.session_key:
request.session.create()
current_session_key = request.session.session_key
else:
current_session_key = request.session.session_key
cart = Cart.objects.get_or_create(session_key = current_session_key)[0]
cart_total = cart.get_total()
return JsonResponse({
'cart_total': cart_total,
}, status = 200)
def check_promo_ajax(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
current_promo = request.GET['promo']
if Promocode.objects.filter(name = current_promo).exists():
print('this promo exist')
promo = Promocode.objects.get(name = current_promo)
cart.promo = promo
cart.save()
return JsonResponse({
'exist': 'yes',
}, status = 200)
else:
print('this promo not exist')
return JsonResponse({
'exist': 'no',
}, status = 200)
def set_promo(request):
current_session_key = request.session.session_key
cart = Cart.objects.get(session_key = current_session_key)
if cart.promo != None:
print('promo exist')
promo_name = cart.promo.name
return JsonResponse({
'promo_name': promo_name,
'exist': 'yes'
}, status = 200)
else:
return JsonResponse({
'exist': 'no'
}, status = 200)
| [
"[email protected]"
] | |
98166df402980f456d8048e29aa8a450f9257655 | 80d879a552ce00a9bc73a26d0ddb74c278867b1f | /scripts/080_hilo_concrete.py | 4abf39886121d03650f95582dad542dc8c6f5d56 | [] | no_license | whiskyching/WS-EscobedoGroup | 4a25abe62fac91b82d3b1abd74ddc02af107457f | bd36d623ec2f60638fe3f330b9ad92c810804e8d | refs/heads/main | 2023-03-20T07:03:19.594765 | 2021-03-16T13:15:14 | 2021-03-16T13:15:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,119 | py | import os
import compas
from compas.utilities import pairwise
from compas_rhino.artists import MeshArtist
HERE = os.path.dirname(__file__)
DATA = os.path.join(HERE, '../data')
FILE = os.path.join(DATA, 'session.json')
session = compas.json_load(FILE)
mesh = session['mesh']
# ==============================================================================
# Idos
# ==============================================================================
idos = mesh.copy()
for face in mesh.faces_where({'is_loaded': False}):
idos.delete_face(face)
idos.remove_unused_vertices()
offset = 0.02
for vertex, attr in idos.vertices(True):
x, y, z = mesh.vertex_coordinates(vertex)
nx, ny, nz = mesh.vertex_normal(vertex)
if attr['nx'] is not None:
nx = attr['nx']
if attr['ny'] is not None:
ny = attr['ny']
if attr['nz'] is not None:
nz = attr['nz']
attr['x'] = x + offset * nx
attr['y'] = y + offset * ny
attr['z'] = z + offset * nz
# ==============================================================================
# Edos
# ==============================================================================
edos = idos.copy()
offset = 0.06
for vertex, attr in edos.vertices(True):
x, y, z = idos.vertex_coordinates(vertex)
nx, ny, nz = idos.vertex_normal(vertex)
if attr['nx'] is not None:
nx = attr['nx']
if attr['ny'] is not None:
ny = attr['ny']
if attr['nz'] is not None:
nz = attr['nz']
attr['x'] = x + offset * nx
attr['y'] = y + offset * ny
attr['z'] = z + offset * nz
# ==============================================================================
# Volume
# ==============================================================================
volume = idos.copy()
volume.flip_cycles()
max_vertex = volume._max_vertex + 1
max_face = volume._max_face + 1
for vertex, attr in edos.vertices(True):
volume.add_vertex(key=vertex + max_vertex, **attr)
for face in edos.faces():
vertices = edos.face_vertices(face)
vertices = [vertex + max_vertex for vertex in vertices]
volume.add_face(vertices)
boundary = edos.vertices_on_boundary()
boundary.append(boundary[0])
for a, b in pairwise(boundary):
volume.add_face([b, a, a + max_vertex, b + max_vertex])
# ==============================================================================
# Export
# ==============================================================================
session['idos'] = idos
session['edos'] = edos
session['volume'] = volume
compas.json_dump(session, FILE)
# ==============================================================================
# visualize
# ==============================================================================
artist = MeshArtist(idos, layer="HiLo::Concrete1::Idos")
artist.clear_layer()
artist.draw_mesh(disjoint=True, color=(255, 0, 0))
artist = MeshArtist(edos, layer="HiLo::Concrete1::Edos")
artist.clear_layer()
artist.draw_mesh(disjoint=True, color=(0, 0, 255))
artist = MeshArtist(volume, layer="HiLo::Concrete1::Volume")
artist.clear_layer()
artist.draw_mesh(disjoint=True)
| [
"[email protected]"
] | |
4670ba9b785563921ebd4e8eb26fa337062abb5b | 1625edfe28b4b0979fd32b4a3c5e55249a993fd5 | /baekjoon14915.py | 7648498a85fccf5a369e7197408b17d1726a754d | [] | no_license | beOk91/baekjoon2 | b8bf504c506c6278899d4107ecfe51974ef13f5e | 39569f8effb8e32405a7d74d98bdabcab783ec56 | refs/heads/master | 2023-05-11T20:11:19.015113 | 2020-09-14T23:58:49 | 2020-09-14T23:58:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | m,n=map(int,input().strip().split())
def conversion(m,n):
c="0123456789ABCDEF"
if m<n:
return str(c[m])
else:
return conversion(m//n,n)+str(c[m%n])
print(conversion(m,n)) | [
"[email protected]"
] | |
5e2e9ee1d976ed4b9dae0c19f9e48d49c14d8d4a | d4442db5a7ab9db2b04fef640a9864f3fba54758 | /src/python/WMCore/Services/Dashboard/DashboardAPI.py | 9f90e4842ae59431378744395dc3404a30601661 | [] | no_license | stuartw/WMCore | fa25ff19ab5058a635d35d3c58a0ac56a3e079a1 | 38c39c43f7237fd316930839674ac9be3c0ee8cc | refs/heads/master | 2021-01-18T07:18:18.324604 | 2012-10-18T22:30:34 | 2012-10-18T22:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | #!/usr/bin/python
"""
This is the Dashboard API Module for the Worker Node
"""
from WMCore.Services.Dashboard import apmon
import time
from types import DictType, StringType, ListType
#
# Methods for manipulating the apmon instance
#
# Internal attributes
apmonInstance = None
apmonInit = False
# Monalisa configuration
apmonConf = ["cms-wmagent-job.cern.ch"]
#
# Method to create a single apmon instance at a time
#
def getApmonInstance( logr, apmonServer ):
global apmonInstance
global apmonInit
if apmonInstance is None and not apmonInit :
apmonInit = True
if apmonInstance is None :
try :
if not apmonServer:
apmonInstance = apmon.ApMon(apmonConf, logr) #apmonLoggingLevel)
else:
apmonInstance = apmon.ApMon(apmonServer, logr)
except Exception, e :
pass
return apmonInstance
#
# Method to free the apmon instance
#
def apmonFree() :
global apmonInstance
global apmonInit
if apmonInstance is not None :
try :
apmonInstance.free()
except Exception, e :
pass
apmonInstance = None
apmonInit = False
#
# Method to send params to Monalisa service
#
def apmonSend(taskid, jobid, params, logr, apmonServer) :
apm = getApmonInstance( logr, apmonServer )
if apm is not None :
if not isinstance(params, DictType) and not isinstance(params, ListType) :
params = {'unknown' : '0'}
if not isinstance(taskid, StringType) :
taskid = 'unknown'
if not isinstance(jobid, StringType) :
jobid = 'unknown'
try :
apm.sendParameters(taskid, jobid, params)
return 0
except Exception, e:
pass
return 1
| [
"sfoulkes@4525493e-7705-40b1-a816-d608a930855b"
] | sfoulkes@4525493e-7705-40b1-a816-d608a930855b |
abe1005bd1d0c5882d3e588d9d3a1e4a7486c579 | 44197b58b52349b0557f4d2327be292d1c01ea50 | /test/test_data_62.py | 2dd9de7ce8a273d4da81d28b4534861d76aaff37 | [] | no_license | jonpurdy/netbox-swagger-python-client | 58b2b7984ea24a690d8910f6a6a496b99e5098f9 | 6bfe8cf3bb753c4d293dd56a541fac026642207f | refs/heads/master | 2021-06-28T03:16:09.670793 | 2017-09-17T18:15:54 | 2017-09-17T18:15:54 | 103,851,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 861 | py | # coding: utf-8
"""
NetBox API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.data_62 import Data62
class TestData62(unittest.TestCase):
""" Data62 unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testData62(self):
"""
Test Data62
"""
# FIXME: construct object with mandatory attributes with example values
#model = swagger_client.models.data_62.Data62()
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
601e04d3f95736775c8e3eee23c2ea0fc2a6192b | 216ddf61c5be758efde2b50fa476ada5354aced5 | /galaxy/gen_test.py | dddbbbb2c8f96cf24df4b8d0981a9c43604dbf60 | [] | no_license | cameronfabbri/ICGANs | 4600020238d6884b710ea0b035b84e86c73705f1 | d6be1a3e752959754be1dbf8af2ead8f75048b37 | refs/heads/master | 2021-01-16T18:11:38.596295 | 2017-11-26T22:35:16 | 2017-11-26T22:35:16 | 100,050,914 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,823 | py | '''
Generates a dataset of encodings from real images using the trained encoder.
'''
import matplotlib.pyplot as plt
from tqdm import tqdm
from matplotlib.pyplot import cm
import scipy.misc as misc
import tensorflow as tf
import tensorflow.contrib.layers as tcl
import cPickle as pickle
import numpy as np
import argparse
import random
import ntpath
import glob
import time
import sys
import cv2
import os
sys.path.insert(0, '../ops/')
from tf_ops import *
import data_ops
from nets import *
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--CHECKPOINT_DIR', required=True,help='checkpoint directory',type=str)
parser.add_argument('--DATASET', required=False,help='The DATASET to use', type=str,default='celeba')
parser.add_argument('--DATA_DIR', required=False,help='Directory where data is', type=str,default='./')
parser.add_argument('--OUTPUT_DIR', required=False,help='Directory to save data', type=str,default='./')
parser.add_argument('--ACTIVATION', required=False,help='Activation function', type=str,default='lrelu')
a = parser.parse_args()
CHECKPOINT_DIR = a.CHECKPOINT_DIR
DATASET = a.DATASET
DATA_DIR = a.DATA_DIR
OUTPUT_DIR = a.OUTPUT_DIR
ACTIVATION = a.ACTIVATION
try: os.makedirs(OUTPUT_DIR)
except: pass
# placeholders for data going into the network
global_step = tf.Variable(0, name='global_step', trainable=False)
images = tf.placeholder(tf.float32, shape=(1, 64, 64, 3), name='images')
encoded = encZ(images, ACTIVATION)
saver = tf.train.Saver(max_to_keep=1)
init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
sess = tf.Session()
sess.run(init)
# restore previous model if there is one
ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if ckpt and ckpt.model_checkpoint_path:
print "Restoring previous model..."
try:
saver.restore(sess, ckpt.model_checkpoint_path)
print "Model restored"
except:
print "Could not restore model"
pass
print 'Loading data...'
# images and annots: _, __
train_images, train_annots, test_images, test_annots, paths = data_ops.load_galaxy(DATA_DIR)
test_len = len(test_annots)
print 'test num:',test_len
info = {}
# want to write out a file with the image path and z vector
for p,img,label in tqdm(zip(paths, test_images, test_annots)):
img = data_ops.normalize(img)
batch_images = np.expand_dims(img, 0)
encoding = sess.run([encoded], feed_dict={images:batch_images})[0][0]
info[p] = [encoding, label]
# write out dictionary to pickle file
p = open(OUTPUT_DIR+'data.pkl', 'wb')
data = pickle.dumps(info)
p.write(data)
p.close()
| [
"[email protected]"
] | |
49d91b038609858a956b8fc002568e272efc77f6 | f7c3b1f65cc4d6dba1dc9dcf70f523497a14d791 | /mujoco/tf_commons/ops.py | ff3b576def5ff27fcc5369b938fb8a9e04891c09 | [
"MIT"
] | permissive | hiwonjoon/ICML2019-TREX | b9cac1ac5d97b22374a92f2f3cf5d8956cdb2482 | 44f92b61ca6c79ac22d468382d4f2fbee164fb7a | refs/heads/master | 2021-06-16T15:52:33.325054 | 2021-04-10T04:57:24 | 2021-04-10T04:57:24 | 184,654,702 | 76 | 24 | MIT | 2020-01-28T22:11:19 | 2019-05-02T21:36:24 | Python | UTF-8 | Python | false | false | 19,966 | py | import tensorflow as tf
class Conv2d(object) :
def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NCHW',padding='SAME') :
with tf.variable_scope(name) :
assert(data_format == 'NCHW' or data_format == 'NHWC')
self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
if( data_format == 'NCHW' ) :
self.strides = [1, 1, d_h, d_w]
else :
self.strides = [1, d_h, d_w, 1]
self.data_format = data_format
self.padding = padding
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( self.data_format =='NCHW' ) :
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,
use_cudnn_on_gpu=True,data_format='NCHW',
strides=self.strides, padding=self.padding),
b,data_format='NCHW',name=name)
else :
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,data_format='NHWC',
strides=self.strides, padding=self.padding),
b,data_format='NHWC',name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormConv2d(object):
def __init__(self,name,input_dim,output_dim,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NHWC',padding='SAME',epsilon=1e-9) :
with tf.variable_scope(name) :
assert data_format == 'NHWC'
self.v = tf.get_variable('v', [k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.strides = [1, d_h, d_w, 1]
self.padding = padding
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,2])
t = tf.nn.conv2d(input_var,v_norm,self.strides,self.padding,data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,1,tf.shape(self.v)[-1]]) * tf.nn.l2_normalize(self.v,axis=[0,1,2])
return tf.nn.bias_add(
tf.nn.conv2d(input_var, w,data_format='NHWC',
strides=self.strides, padding=self.padding),
self.b,data_format='NHWC',name=name)
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class DepthConv2d(object) :
def __init__(self,name,input_dim,channel_multiplier,k_h=4,k_w=4,d_h=2,d_w=2,
stddev=0.02, data_format='NCHW', padding='SAME') :
with tf.variable_scope(name) :
assert(data_format == 'NCHW' or data_format == 'NHWC')
self.w = tf.get_variable('w', [k_h, k_w, input_dim, channel_multiplier],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[input_dim*channel_multiplier], initializer=tf.constant_initializer(0.0))
if( data_format == 'NCHW' ) :
self.strides = [1, 1, d_h, d_w]
else :
self.strides = [1, d_h, d_w, 1]
self.data_format = data_format
self.padding = padding
def __call__(self,input_var,name=None,**xargs) :
return tf.nn.bias_add(
tf.nn.depthwise_conv2d(input_var, self.w,
data_format=self.data_format,
strides=self.strides, padding=self.padding),
self.b,data_format=self.data_format,name=name)
class Conv3d(object) :
def __init__(self,name,input_dim,output_dim,k_t=2,k_h=4,k_w=4,d_t=1,d_h=1,d_w=1,
stddev=0.02, data_format='NDHWC') :
with tf.variable_scope(name) :
assert(data_format == 'NDHWC')
self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.strides = [d_t,d_h,d_w]
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
#k_t,k_h,k_w,_,_ = self.w.get_shape().as_list()
#_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC")
return tf.nn.bias_add(
tf.nn.convolution(input_var, w,
strides=self.strides,
data_format='NDHWC',
padding='SAME'),
b,name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class DilatedConv3D(object) :
def __init__(self,name,input_dim,output_dim,k_t=2,k_h=3,k_w=3,d_t=2,d_h=1,d_w=1,
stddev=0.02, data_format='NDHWC') :
with tf.variable_scope(name) :
assert(data_format == 'NDHWC')
self.w = tf.get_variable('w', [k_t, k_h, k_w, input_dim, output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.strides = [1,1,1]
self.dilates = [d_t, d_h, d_w]
def __call__(self,input_var,name=None) :
k_t,k_h,k_w,_,_ = self.w.get_shape().as_list()
_t = tf.pad(input_var, [[0,0],[0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0]], "SYMMETRIC")
return tf.nn.bias_add(
tf.nn.convolution(_t, self.w,
strides=self.strides, dilation_rate=self.dilates,
padding='VALID'),
self.b,name=name)
class Linear(object) :
def __init__(self,name,input_dim,output_dim,stddev=0.02) :
with tf.variable_scope(name) :
self.w = tf.get_variable('w',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(0.0))
def __call__(self,input_var,name=None,w=None,b=None,**kwargs) :
w = w if w is not None else self.w
b = b if b is not None else self.b
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
return tf.matmul(tf.reshape(input_var,[-1,dims]),w) + b
else :
return tf.matmul(input_var,w)+b
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormLinear(object):
def __init__(self,name,input_dim,output_dim,stddev=0.02,epsilon=1e-10) :
with tf.variable_scope(name) :
self.v = tf.get_variable('v',[input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[output_dim],
initializer=tf.constant_initializer(float('nan')))
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
if( input_var.shape.ndims > 2 ) :
dims = tf.reduce_prod(tf.shape(input_var)[1:])
input_var = tf.reshape(input_var,[-1,dims])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=0)
t = tf.matmul(input_var,v_norm)
mu,var = tf.nn.moments(t,axes=[0])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.expand_dims(self.g,axis=0) * tf.nn.l2_normalize(self.v,axis=0)
return tf.matmul(input_var,w)+self.b
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class SymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, input_dim, output_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[output_dim], initializer=tf.constant_initializer(0.0))
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return tf.nn.bias_add(
tf.nn.conv2d(_t, self.w,
data_format='NHWC', #we can't use cudnn due to resize method...
strides=[1,1,1,1], padding="VALID"),
self.b,data_format='NHWC',name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormSymPadConv2d(object): #Resize and Convolution(upsacle by 2)
def __init__(self,name,input_dim,output_dim,
k_h=3,k_w=3,stddev=0.02) :
assert k_h%2==1 and k_w%2==1, 'kernel size should be odd numbers to ensure exact size'
with tf.variable_scope(name) :
self.conv2d = WeightNormConv2d('conv',input_dim,output_dim,k_h,k_w,1,1,data_format='NHWC',padding='VALID')
self.padding = [ [0,0],[k_h//2,k_h//2],[k_w//2,k_w//2],[0,0] ]
def __call__(self,input_var,name=None,**kwargs):
_,h,w,c = input_var.shape.as_list()
_t = tf.image.resize_nearest_neighbor(input_var, [h*2, w*2])
_t = tf.pad(_t,self.padding, mode='SYMMETRIC')
return self.conv2d(_t)
def get_variables(self):
return self.conv2d.get_variables()
class TransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NCHW') :
with tf.variable_scope(name) :
self.w = tf.get_variable('w', [k_h, k_w, out_dim, input_dim],
initializer=tf.random_normal_initializer(stddev=stddev))
self.b = tf.get_variable('b',[out_dim], initializer=tf.constant_initializer(0.0))
self.data_format = data_format
if( data_format =='NCHW' ):
self.strides = [1, 1, d_h, d_w]
else:
self.strides = [1, d_h, d_w, 1]
def __call__(self,input_var,name=None,**xargs):
shapes = tf.shape(input_var)
if( self.data_format == 'NCHW' ):
shapes = tf.stack([shapes[0],tf.shape(self.b)[0],shapes[2]*self.strides[2],shapes[3]*self.strides[3]])
else:
shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,self.w,output_shape=shapes,
data_format=self.data_format,
strides=self.strides,padding='SAME'),
self.b,data_format=self.data_format,name=name)
def get_variables(self):
return {'w':self.w,'b':self.b}
class WeightNormTransposedConv2d(object):
def __init__(self,name,input_dim,out_dim,
k_h=4,k_w=4,d_h=2,d_w=2,stddev=0.02,data_format='NHWC',epsilon=1e-9) :
with tf.variable_scope(name) :
assert data_format == 'NHWC'
self.v = tf.get_variable('v', [k_h, k_w, out_dim, input_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev))
self.g = tf.get_variable('g',[out_dim],
initializer=tf.constant_initializer(float('nan')))
self.b = tf.get_variable('b',[out_dim],
initializer=tf.constant_initializer(float('nan')))
self.strides = [1, d_h, d_w, 1]
self.epsilon = epsilon
def __call__(self,input_var,name=None,**kwargs) :
shapes = tf.shape(input_var)
shapes = tf.stack([shapes[0],shapes[1]*self.strides[1],shapes[2]*self.strides[2],tf.shape(self.b)[0]])
def _init():
v_norm = tf.nn.l2_normalize(self.v,axis=[0,1,3])
t = tf.nn.conv2d_transpose(input_var,v_norm,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC')
mu,var = tf.nn.moments(t,axes=[0,1,2])
std = tf.sqrt(var+self.epsilon)
return [tf.assign(self.g,1/std),tf.assign(self.b,-1.*mu/std)]
require_init = tf.reduce_any(tf.is_nan(self.g))
init_ops = tf.cond(require_init,_init,lambda : [self.g,self.b])
with tf.control_dependencies(init_ops):
w = tf.reshape(self.g,[1,1,tf.shape(self.v)[2],1]) * tf.nn.l2_normalize(self.v,axis=[0,1,3])
return tf.nn.bias_add(
tf.nn.conv2d_transpose(input_var,w,
output_shape=shapes,
strides=self.strides,
padding='SAME',
data_format='NHWC'),
self.b,data_format='NHWC',name=name)
def get_variables(self):
#TODO: self.v should be l2-normalized or not? / currently not.
return {'v':self.v,'b':self.b,'g':self.g}
class LayerNorm():
def __init__(self,name,axis,out_dim=None,epsilon=1e-7,data_format='NHWC') :
"""
out_dim: Recentering by adding bias again.
The previous bias can be ignored while normalization.
(when you normalize over channel only)
"""
assert data_format=='NCHW' or data_format=='NHWC'
assert len(axis) != 1 or (len(axis) == 1 and out_dim != None)
"""
TODO: Track Moving mean and variance, and use this statistics.
with tf.variable_scope(name):
self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False)
self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False)
"""
if out_dim is not None:
with tf.variable_scope(name) :
self.gamma= tf.get_variable('gamma',[1,1,1,out_dim], initializer=tf.constant_initializer(1.0))
self.beta = tf.get_variable('beta',[out_dim], initializer=tf.constant_initializer(0.0))
else:
self.gamma = None
self.beta = None
self.axis = axis
self.epsilon = epsilon
self.data_format = data_format
self.name = name
def __call__(self,input_var,**kwargs) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
ret = (input_var - mean) / tf.sqrt(var+self.epsilon)
if self.gamma is None :
return ret
else:
return tf.nn.bias_add(ret*self.gamma,
self.beta,data_format=self.data_format)
def get_variables(self):
return {'gamma':self.gamma,'beta':self.beta} if self.gamma is not None else {}
class InstanceNorm():
def __init__(self,name,format='NCHW',epsilon=1e-5) :
assert(format=='NCHW' or format=='NHWC')
self.axis = [2,3] if format == 'NCHW' else [1,2]
self.epsilon = epsilon
self.name = name
def __call__(self,input_var) :
mean, var = tf.nn.moments(input_var, self.axis, keep_dims=True)
return (input_var - mean) / tf.sqrt(var+self.epsilon)
class BatchNorm(object):
def __init__(self,name,dims,axis=1,epsilon=1e-3,momentum=0.999,center=True,scale=True) :
self.momentum = momentum
self.epsilon = epsilon
self.axis = axis
self.center=center
self.scale=scale
with tf.variable_scope(name) as scope:
with tf.variable_scope('bn') :
self.gamma= tf.get_variable('gamma',[dims], initializer=tf.constant_initializer(1.0))
self.beta = tf.get_variable('beta',[dims], initializer=tf.constant_initializer(0.0))
self.moving_mean = tf.get_variable('moving_mean',[dims], initializer=tf.constant_initializer(0.0), trainable=False)
self.moving_variance = tf.get_variable('moving_variance',[dims], initializer=tf.constant_initializer(1.0), trainable=False)
self.scope = scope
def __call__(self,input_var,is_training,**xargs) :
with tf.variable_scope(self.scope) :
return tf.layers.batch_normalization(
input_var,
axis=self.axis,
momentum=self.momentum,
epsilon=self.epsilon,
center=self.center,
scale=self.scale,
training=is_training,
reuse=True,
name='bn')
"""
---Do NOT forget to add update_ops dependencies for your loss function.---
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS,tf.get_default_graph().get_name_scope())
#And, do not make any scope inside map_fn, since scope.name will not work...(it is corrupted by map_fn.)
print(update_ops)
with tf.control_dependencies(update_ops):
"""
def get_variables(self):
return {}
class Lrelu(object):
def __init__(self,leak=0.2,name='lrelu') :
self.leak = leak
self.name = name
def __call__(self, x, **kwargs) :
return tf.maximum(x, self.leak*x, name=self.name)
def get_variables(self):
return {}
class ResidualBlock() :
def __init__(self,name,filters,filter_size=3,non_linearity=Lrelu,normal_method=InstanceNorm) :
self.conv_1 = Conv2d(name+'_1',filters,filters,filter_size,filter_size,1,1)
self.normal = normal_method(name+'_norm')
self.nl = non_linearity()
self.conv_2 = Conv2d(name+'_2',filters,filters,filter_size,filter_size,1,1)
def __call__(self,input_var) :
_t = self.conv_1(input_var)
_t = self.normal(_t)
_t = self.nl(_t)
_t = self.conv_2(_t)
return input_var + _t
| [
"[email protected]"
] | |
0c61ce225d80072549a004ed2591a718c5672896 | 7bededcada9271d92f34da6dae7088f3faf61c02 | /pypureclient/flashblade/FB_2_8/models/target_get_response.py | ac033a0a864676d3ec597b61877bb7714e0e01c8 | [
"BSD-2-Clause"
] | permissive | PureStorage-OpenConnect/py-pure-client | a5348c6a153f8c809d6e3cf734d95d6946c5f659 | 7e3c3ec1d639fb004627e94d3d63a6fdc141ae1e | refs/heads/master | 2023-09-04T10:59:03.009972 | 2023-08-25T07:40:41 | 2023-08-25T07:40:41 | 160,391,444 | 18 | 29 | BSD-2-Clause | 2023-09-08T09:08:30 | 2018-12-04T17:02:51 | Python | UTF-8 | Python | false | false | 4,220 | py | # coding: utf-8
"""
FlashBlade REST API
A lightweight client for FlashBlade REST API 2.8, developed by Pure Storage, Inc. (http://www.purestorage.com/).
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flashblade.FB_2_8 import models
class TargetGetResponse(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'continuation_token': 'str',
'total_item_count': 'int',
'items': 'list[Target]'
}
attribute_map = {
'continuation_token': 'continuation_token',
'total_item_count': 'total_item_count',
'items': 'items'
}
required_args = {
}
def __init__(
self,
continuation_token=None, # type: str
total_item_count=None, # type: int
items=None, # type: List[models.Target]
):
"""
Keyword args:
continuation_token (str): Continuation token that can be provided in the `continuation_token` query param to get the next page of data. If you use the `continuation_token` to page through data you are guaranteed to get all items exactly once regardless of how items are modified. If an item is added or deleted during the pagination then it may or may not be returned. The `continuation_token` is generated if the `limit` is less than the remaining number of items, and the default sort is used (no sort is specified).
total_item_count (int): Total number of items after applying `filter` params.
items (list[Target]): A list of target objects.
"""
if continuation_token is not None:
self.continuation_token = continuation_token
if total_item_count is not None:
self.total_item_count = total_item_count
if items is not None:
self.items = items
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `TargetGetResponse`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
return None
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TargetGetResponse, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TargetGetResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"[email protected]"
] | |
6cc605df33d06fc2f8575460a8beca5e972c9fe6 | f65b633d0760e20ef5e0066be10aa18168f5659e | /documents/views.py | 448b7e5a09e37e05fa8e98adc18b108ace1694e2 | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | boxed/curia | a2b5ce5feda114bac1637de112b20026dabee5ae | c19f12f77b570b180acf4ec1ee05ea77b87b5fc9 | refs/heads/master | 2021-03-12T22:53:36.748160 | 2014-10-20T16:56:45 | 2014-10-20T16:56:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,834 | py | import re
from datetime import datetime
from django.contrib.auth.views import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.utils.simplejson import dumps
from django.contrib.auth.models import User,Group
from django.utils.encoding import smart_unicode
from curia.documents.models import Document, Version
from curia.shortcuts import *
from curia import *
from django.utils.translation import ugettext as _
from sets import Set
from curia.labels import get_labels
from curia.labels.models import Label
from curia.labels import handle_labels, mark_labels_as_deleted
# helper
def get_latest(document_id):
try: return Version.objects.filter(document=document_id).order_by('-id')[0]
except Version.DoesNotExist: return None;
def validate_wiki_links(owner_user, owner_group, form, contents_name = 'contents'):
from django.utils.encoding import smart_unicode
contents = smart_unicode(form.data[contents_name])
links = list(re.finditer(r'(\[(.*?)\])', contents))
errors = []
link_targets = {}
# examples of use:
# [images/groups/1/sets/3]
for link in links:
title = link.groups()[1]
if ';' in title:
group_name, title = title.split(u';')
group = get_objects_from(Group, name=group_name)
if len(group) == 1:
owner_group = group[0]
else:
user = get_objects_from(User, username=group_name)
if len(user) == 1:
owner_user = user[0]
else:
errors.append(_('%s is not a valid group or user name') % group_name)
continue
documents = get_objects_from(Document, owner_user=owner_user, owner_group=owner_group, title=title, deleted=False)
if len(documents) != 1:
errors.append(_('Could not find document %s') % link.groups()[1])
else:
link_targets[link.groups()[1]] = documents[0]
if len(errors) != 0:
form.errors[contents_name] = errors
else:
# replace from the end as to not change the string in a way that interferes with the following replace operation
links.reverse()
for link in links:
target = link_targets[link.groups()[1]]
contents = contents.replace(link.groups()[0], '<a href="'+target.get_absolute_url()+'">'+smart_unicode(target)+'</a>')
return contents
# views
def version_response(request, v):
return render_to_response(request, 'documents/version.html', {'version': v, 'document': v.document, 'owner':get_owner(v.document)})
def view_latest(request, document_id):
v = get_latest(document_id)
check_access(request.user, obj=v.document, command='view')
if v == None:
raise Http404
return version_response(request, v)
def view_version(request, version_id, document_id):
v = get_object_or_404_and_check_access(request, Version, pk=version_id, command='view')
check_access(request.user, obj=v.document, command='view')
#if v.document.id != document_id:
# raise something
return version_response(request, v)
def view_version_list(request, document_id):
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='view')
return render_to_response(request, 'documents/version_list.html', {'version_list': Version.objects.filter(document=document_id), 'document': Document.objects.get(pk=document_id)})
def add_document(request):
is_presentation = get_boolean(request,'is_presentation')
owner_group = None
owner_user = None
class DocumentForm(django.forms.Form):
title = django.forms.CharField(max_length=1024, label=_('Title'))
#labels = django.forms.CharField(required=False, label=_('Labels'))
contents = django.forms.CharField(required=False, widget = django.forms.Textarea, label=_('Contents'))
group_id = get_integer(request,'group_id')
user_id = get_integer(request,'user_id')
if group_id:
owner_group = get_object_or_404_and_check_access(request, Group, pk=group_id, command='add')
check_access(request.user, obj=owner_group, command='add document')
else:
owner_user = get_object_or_404_and_check_access(request, User, pk=user_id, command='add')
check_access(request.user, obj=owner_user, command='add document')
if request.POST:
form = DocumentForm(request.POST)
if form.is_valid():
#Handle the document
if owner_group != None:
document = Document.objects.create(owner_group=owner_group, owner_user=owner_user, is_presentation=is_presentation)
else:
document = Document.objects.create(owner_user=owner_user, is_presentation=is_presentation)
if document.is_presentation:
if group == 0:
title = owner_user.username + 's Presentation'
else:
owner_group = get_object_or_404_and_check_access(request, Group, pk=group, command='add')
title = owner_group.name + 's Presentation'
else:
title = form.cleaned_data['title']
new_version = Version(document=document,title=title, contents=strip_p(form.cleaned_data['contents']), owner=request.user)
new_version.save()
#Handle the labels
#handle_labels(request,document)
if document.is_presentation:
if document.owner_group:
return HttpResponseRedirect(document.owner_group.get_absolute_url())
else:
return HttpResponseRedirect(document.owner_user.get_absolute_url())
return HttpResponseRedirect(document.get_absolute_url())
else:
form = DocumentForm()
return render_to_response(request, 'documents/add.html', {'form':form})
def edit_document(request, document_id, is_creating=False):
group_id = get_integer(request, 'group_id')
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='edit')
user = request.user
class DocumentForm(django.forms.Form):
if not document.is_presentation:
title = django.forms.CharField(max_length=1024, label=_('Title'))
#labels = django.forms.CharField(required=False, label=_('Labels'))
contents = django.forms.CharField(required=False, widget = django.forms.Textarea, label=_('Contents'))
edit_version = django.forms.IntegerField(widget = django.forms.HiddenInput, required=True)
if request.POST:
form = DocumentForm(request.POST)
if int(request.POST['edit_version']) != document.get_latest_version().id:
post = request.POST.copy()
post['edit_version'] = document.get_latest_version().id
form = DocumentForm(post)
form.errors['contents'] = [_('Document was changed after you began editing it, please review the changes and then press save again')]
if form.is_valid():
#Handle the labels
#handle_labels(request,document)
#Handle the document
if not document.is_presentation:
if form.cleaned_data.has_key('title'):
title = form.cleaned_data['title']
else:
title = document.get_latest_version().title
else:
if user.first_name.endswith('s'):
title=user.first_name+' presentation'
else:
title = user.first_name+'s presentation'
new_version = Version(document=document,title=title, contents=strip_p(form.cleaned_data['contents']), owner=request.user)
new_version.save()
if request.external:
from curia.homepage.models import MenuItem
try:
menu = MenuItem.objects.get(content_type=get_content_type(document), object_id=document.id)
menu.title = title
menu.save()
except MenuItem.DoesNotExist:
pass
if document.is_presentation:
if document.owner_group:
return HttpResponseRedirect(document.owner_group.get_absolute_url())
else:
return HttpResponseRedirect(document.owner_user.get_absolute_url())
return HttpResponseRedirect(document.get_absolute_url())
else:
latest_version = document.get_latest_version()
form = DocumentForm(initial={'title': latest_version.title, 'contents': latest_version.contents, 'edit_version':latest_version.id})
return render_to_response(request, 'documents/edit.html', {'form':form, 'document':document})
def delete_document(request, document_id):
document = get_object_or_404_and_check_access(request, Document, pk=document_id, command='delete')
from curia import delete_objects
delete_objects(document)
if request.external:
from curia.homepage.models import MenuItem
try:
menu = MenuItem.objects.get(content_type=get_content_type(document), object_id=document.id)
menu.delete()
except MenuItem.DoesNotExist:
pass
return HttpResponse(dumps(document_id, ensure_ascii=False), content_type="text/json; charset=UTF-8")
def view_documents_of_user(request, user_id):
user = get_object_or_404_and_check_access(request, User, pk=user_id, command='view')
objects = get_objects_from(Document, deleted=False, owner_user=user, owner_group=None, is_presentation=False)
return render_to_response(request, 'documents/document_list.html', {'owner':user, 'objects':objects, 'type':'users'})
def view_documents_of_group(request, group_id=None):
if group_id != None:
group = get_object_or_404_and_check_access(request, Group, pk=group_id, command='view')
else:
group = get_current_community()
objects = get_objects_from(Document, deleted=False, owner_group=group, is_presentation=False)
return render_to_response(request, 'documents/document_list.html', {'owner':group, 'objects':objects, 'type':'groups'})
def revert_to_version(request, document_id, version_id):
old_version = Version.objects.get(pk = version_id)
document = Document.objects.get(pk = document_id)
new_version = Version(document=document,title=old_version.title, contents=old_version.contents, owner=request.user)
new_version.save()
return version_response(request, new_version)
| [
"[email protected]"
] | |
ab7a78b9db6f60371ee1fac74f8b8411ff23aa43 | a179d2abea58ee4d987bf05729a5e7df727af3cd | /instaclone/settings.py | 6e13a5e35aa00ba74ca16a19dd70fe50c0cb34ee | [
"MIT"
] | permissive | Derrick-Nyongesa/instagram-clone | ced05a4c334c9e95e96bec9a3883b448c5fa95c6 | 2f3c018c33aa440160401f0c1878a2670f2f0081 | refs/heads/main | 2023-05-14T01:32:36.211904 | 2021-05-26T13:42:26 | 2021-05-26T13:42:26 | 369,403,582 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,796 | py | """
Django settings for instaclone project.
Generated by 'django-admin startproject' using Django 3.2.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
import os
from decouple import config, Csv
import cloudinary
import cloudinary.uploader
import cloudinary.api
import django_heroku
import dj_database_url
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'instagram',
'bootstrap3',
'cloudinary'
]
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'instaclone.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
],
},
},
]
WSGI_APPLICATION = 'instaclone.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
if config('MODE')=="dev":
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_NAME'),
'USER': config('DB_USER'),
'PASSWORD': config('DB_PASSWORD'),
'HOST': config('DB_HOST'),
'PORT': '',
}
}
# production
else:
DATABASES = {
'default': dj_database_url.config(
default=config('DATABASE_URL')
)
}
db_from_env = dj_database_url.config(conn_max_age=500)
DATABASES['default'].update(db_from_env)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Africa/Nairobi'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
LOGIN_REDIRECT_URL = '/'
LOGOUT_REDIRECT_URL = '/accounts/login/'
#AUTH_PROFILE_MODULE = 'accounts.Profile'
EMAIL_USE_TLS = config('EMAIL_USE_TLS')
EMAIL_HOST = config('EMAIL_HOST')
EMAIL_PORT = config('EMAIL_PORT')
EMAIL_HOST_USER = config('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD')
cloudinary.config(
cloud_name = config("CLOUDINARY_NAME"),
api_key = config("CLOUDINARY_KEY"),
api_secret = config("CLOUDINARY_SECRET")
)
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
django_heroku.settings(locals()) | [
"[email protected]"
] | |
b91ee62ab15974dcb724ceeb00d00689410e332f | 7bf1dc58ba0884ed957efdb5459ae44851b2b36e | /practice_450/greedy/33_rearrange_characters.py | 858f75ba242070202848f0f4f1146c91f0ceea28 | [] | no_license | ksaubhri12/ds_algo | 672260f07f41bcfc33f8ac23a64085a1f27ab4a5 | 46505b89371cae3321f48609dd755c7e5cfed302 | refs/heads/master | 2023-05-12T08:37:06.789111 | 2023-05-03T03:06:49 | 2023-05-03T03:06:49 | 211,793,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 935 | py | def rearrange_characters(input_string: str):
n = len(input_string)
dict_value = {}
for i in range(0, n):
if input_string[i] in dict_value:
dict_value[input_string[i]] = dict_value[input_string[i]] + 1
else:
dict_value[input_string[i]] = 1
sorted_count_list = sorted(dict_value, key=dict_value.get, reverse=True)
i = 0
start = 0
char_list = list(input_string)
while len(sorted_count_list) > 0:
char = sorted_count_list.pop(0)
count = dict_value[char]
if count > n / 2:
return -1
start = start + 1
for k in range(0, count):
char_list[i] = char
i = i + 2
if i >= n:
i = 1
return ''.join(char_list)
if __name__ == '__main__':
print(rearrange_characters('geeksforgeeks'))
print(rearrange_characters('bbbbb'))
print(rearrange_characters('kkk'))
| [
"[email protected]"
] | |
6d11a2a08e99746fcf09d5f7a1e8b2a1c35a11e3 | 9716316eb0c5b5a1487866d37b58efc116511d22 | /charmdet/runReconstruction.py | 7e60609548ccbac61bb0a6d7f587dec8d911689f | [] | no_license | klleung/FairShip | 68245fcd042f47a5ed2feeaad1c2e84e5aa21241 | a7e67ac58387f651722068e8325513b5e0d6832a | refs/heads/master | 2020-06-06T23:02:00.991756 | 2019-07-19T12:23:35 | 2019-07-19T12:23:35 | 192,870,986 | 2 | 0 | null | 2019-06-20T07:26:44 | 2019-06-20T07:26:44 | null | UTF-8 | Python | false | false | 15,856 | py |
import os,subprocess,ROOT,time,multiprocessing
from rootpyPickler import Unpickler
from rootpyPickler import Pickler
import pwd
ncpus = int(multiprocessing.cpu_count()*3./4.)
pathToMacro = '$FAIRSHIP/charmdet/'
def count_python_processes(macroName):
username = pwd.getpwuid(os.getuid()).pw_name
callstring = "ps -f -u " + username
# only works if screen is wide enough to print full name!
status = subprocess.check_output(callstring,shell=True)
n=0
for x in status.split('\n'):
if not x.find(macroName)<0 and not x.find('python') <0: n+=1
return n
fileList = {}
badFiles = []
run = "RUN_8000_2395" # "RUN_8000_2396"
eospath='/eos/experiment/ship/data/muflux/DATA_Rebuild_8000/rootdata/'+run
def getFilesFromEOS():
# list of files
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospath,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
if not x.find('START')<0: continue
fname = x[x.find('/eos'):]
nentries = 0
try:
f=ROOT.TFile.Open(os.environ['EOSSHIP']+fname)
nentries=f.cbmsim.GetEntries()
fileList[fname]=nentries
except:
print "problem accessing file",fname
badFiles.append(fname)
Nfiles = len(fileList)
tmp = {}
for fname in fileList:
newName = fname[fname.rfind('/')+1:]
rc = os.system("xrdcp -f $EOSSHIP"+fname+" "+newName)
tmp[newName]=fileList[fname]
fnames = tmp.keys()
fnames.sort()
return tmp,fnames
def getFilesLocal():
# list of files
for fname in os.listdir('.'):
if fname.find('.root')<0: continue
if not fname.find('_RT')<0: continue
test = fname.replace('.root','_RT.root')
if os.path.isfile(test): continue
nentries = 0
try:
f=ROOT.TFile.Open(fname)
nentries=f.cbmsim.GetEntries()
fileList[fname]=nentries
except:
print "problem accessing file",fname
badFiles.append(fname)
Nfiles = len(fileList)
fnames = fileList.keys()
fnames.sort()
return fileList,fnames
def recoStep0(local=False):
if local: tmp,fnames = getFilesLocal()
else: tmp,fnames = getFilesFromEOS()
Nfiles = len(fnames)
print "fileList established ",Nfiles
Ndone = 0
while Ndone < Nfiles:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep0 -f "
# group files to get better stats
Ntot = 0
sample = []
i = 0
for k in range(Ndone,Nfiles):
Ntot += tmp[fnames[k]]
sample.append(fnames[k])
i+=1
if Ntot>350000: break
Ndone += i
# check that enough files remain
Nextsample = []
Ntot = 0
for k in range(Ndone,Nfiles):
Ntot += tmp[fnames[k]]
Nextsample.append(fnames[k])
if Ntot>350000: break
if Ntot < 350000:
for s in Nextsample: sample.append(s)
Ndone += len(Nextsample)
if len(sample)==0: break
for s in sample: cmd+=s+','
print 'step 0:',cmd[:cmd.rfind(',')],Ndone,Nfiles
os.system(cmd[:cmd.rfind(',')]+" &")
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(200)
if Ndone%100==0: cleanUp()
while count_python_processes('drifttubeMonitoring')>0: time.sleep(200)
print "files created with RT relations "
cleanUp()
def checkFilesWithRT():
fok = []
fNotok = []
fRaw = []
for fname in os.listdir('.'):
if not fname.find('histo')<0: continue
if not fname.find('_RT')<0:
f=ROOT.TFile(fname)
RT = f.Get('tMinAndTmax')
if RT:
fok.append(fname)
else:
fNotok.append(fname)
elif fname.find('root')>0 and not fname.find('SPILL')<0:
fRaw.append(fname)
print len(fok),len(fNotok),len(fRaw)
return fok,fNotok,fRaw
def checkMinusTwo():
fok,fNotok,fRaw = checkFilesWithRT()
for fname in fRaw:
if fname in fok: continue
N=0
f=ROOT.TFile(fname)
sTree = f.cbmsim
for n in range(sTree.GetEntries()):
rc = sTree.GetEvent(n)
for m in sTree.Digi_MufluxSpectrometerHits:
if m.GetDetectorID()<0: N+=1
print sTree.GetCurrentFile(),N
def recoStep1():
fileList=[]
# all RT files
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(x)
if test.cbmsim.GetBranch("FitTracks"): continue
fileList.append(x)
fileList.sort()
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep1 -u 1 -f "+fname+' &'
print 'step 1:', cmd
os.system(cmd)
time.sleep(100)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def checkAlignment(fileList=[]):
# all RT files
if len(fileList)==0:
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos-residuals')<0:
fileList.append(x)
fileList.sort()
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c alignment -f "+fname+' &'
print 'make residual plots:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(100)
print "finished all the tasks."
def runMC():
# fast MC
inputFile = "/eos/experiment/ship/data/Mbias/background-prod-2018/pythia8_Geant4_10.0_withCharmandBeauty0_mu.root" # entries 13450391L
os.system("python $FAIRSHIP/macro/run_simScript.py -n 100000 --MuonBack --charm=1 --CharmdetSetup=0 -f "+inputFile)
# full simulation
os.system("python $SHIPBUILD/FairShip/macro/run_simScript.py --Muflux -n 1000 --charm=1 --CharmdetSetup=0 --charm=1 --CharmdetSetup=0")
def checkFilesWithTracks(D='.'):
fileList=[]
rest=[]
zombie=[]
# all RT files
if D.find('eos')<0:
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
if not test.GetKey('cbmsim'):
zombie.append(x)
elif test.cbmsim.GetBranch("FitTracks"): fileList.append(x)
else: rest.append(x)
else:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+D,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
fname = x[x.find('/eos'):]
try:
test=ROOT.TFile.Open(os.environ['EOSSHIP']+fname)
if not test.GetKey('cbmsim'):
zombie.append(fname)
elif test.cbmsim.GetBranch("FitTracks"): fileList.append(fname)
else: rest.append(fname)
except:zombie.append(fname)
fileList.sort()
print "n with tracks",len(fileList),' rest:',len(rest),' zombies:',zombie
return fileList
def checkFilesWithTracks2(D='.'):
badFile=[]
# all RT files
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
sTree = test.cbmsim
if not sTree: badFile.append(x+"?")
elif sTree.GetBranch("FitTracks"):
prev = 0
for n in range(min(20000,sTree.GetEntries())):
rc = sTree.GetEvent(n)
if sTree.FitTracks.GetEntries()>0:
st = sTree.FitTracks[0].getFitStatus()
if not st.isFitConverged(): continue
if prev==st.getChi2():
badFile.append(x)
break
else: prev=st.getChi2()
return badFile
def checkFilesWithTracks3(D='.'):
badFile={}
# all RT files
for x in os.listdir(D):
if x.find('_RT')>0 and x.find('histos')<0:
test = ROOT.TFile(D+'/'+x)
sTree = test.cbmsim
if not sTree:
badFile.append(x+"?")
continue
b = sTree.GetBranch("FitTracks")
if b:
if b.GetZipBytes()/1.E6 < 1.: badFile[x]= b.GetZipBytes()/1.E6
return badFile
# for f in bf: os.system('cp ../../ship-ubuntu-1710-64/RUN_8000_2395/'+f+' .')
def cleanUp(D='.'):
# remove raw data files for files with RT relations
fok,fNotok,fRaw = checkFilesWithRT()
for x in fok:
r = x.replace('_RT','')
cmd = 'rm '+r
os.system(cmd)
def copyMissingFiles(remote="../../ship-ubuntu-1710-64/RUN_8000_2395",exclude=[]):
toCopy=[]
allFilesR = os.listdir(remote)
allFilesL = os.listdir(".")
for fname in allFilesR:
if not fname.find('histos')<0: continue
if fname.find('RT')<0: continue
if fname in exclude: continue
if not fname in allFilesL: toCopy.append(fname)
print "len",len(toCopy)
for fname in toCopy: os.system('cp '+remote+"/"+fname+' .')
def importRTFiles(local='.',remote='/home/truf/ship-ubuntu-1710-32/home/truf/muflux/Jan08'):
# mkdir /media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-32
# sshfs ship-ubuntu-1710-32.cern.ch:/home/truf/muflux /media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-32
fileWithTracks = checkFilesWithTracks(local)
allFiles = os.listdir(remote)
for x in allFiles:
if x.find('_RT')>0 and x.find('histos')<0 and not x in fileWithTracks:
os.system('cp '+remote+'/'+x+' .')
def importRecoFiles(local='.',remote='/media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-16/home/truf/muflux/Jan08'):
fileWithTracks = checkFilesWithTracks(remote)
for x in fileWithTracks: os.system('cp '+remote+'/'+x+' .')
def mergeHistos(local='.',case='residuals'):
allFiles = os.listdir(local)
if case == 'residuals':
dest = 'residuals.root'
tag = 'histos-residuals'
else:
dest = 'momDistributions.root'
tag = 'histos-analysis'
cmd = "hadd -f "+dest+' '
N=0
for x in allFiles:
if not x.find(tag)<0 :
cmd += (local+'/'+x+' ')
N+=1
if N>500:
os.system(cmd)
os.system('cp '+dest+' tmp.root')
cmd = "hadd -f "+dest+' tmp.root '
N=0
os.system(cmd)
def checkRecoRun(eosLocation=eospath,local='.'):
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eosLocation,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
if not x.find('START')<0: continue
fname = x[x.rfind('/')+1:]
RTname = fname.replace('.root','_RT.root')
histosName = "histos-residuals-"+RTname
if not os.path.isfile(RTname):
print "missing RT file",fname
if not os.path.isfile(histosName):
print "missing histogram file",fname
def exportRunToEos(eosLocation="/eos/experiment/ship/user/truf/muflux-reco",run=run,local="."):
temp = os.system("xrdfs "+os.environ['EOSSHIP']+" mkdir "+eosLocation+"/"+run)
failures = []
for x in os.listdir(local):
if x.find('.root')<0: continue
cmd = "xrdcp -f "+x+" $EOSSHIP/"+eosLocation+"/"+run+"/"+x
rc = os.system(cmd)
if rc != 0: failures.append(x)
if len(failures)!=0: print failures
def makeMomDistributions(run=0):
if run==0: fileList = checkFilesWithTracks(D='.')
else:
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'+run
fileList = []
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
for x in temp.split('\n'):
if x.find('.root')<0: continue
fileList.append( os.environ['EOSSHIP'] + x[x.find('/eos'):])
# all RT files with tracks
for fname in fileList:
if not fname.find('sys')<0: continue
if os.path.isfile('histos-analysis-'+fname[fname.rfind('/')+1:]): continue
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c anaResiduals -f "+fname+' &'
print 'momentum analysis:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
print "finished all the tasks."
zeroField = ['2199','2200','2201']
noRPC = ['2144','2154','2192','2210','2217','2218','2235','2236','2237','2240','2241','2243','2291','2345','2359']
def massProduction(keyword = 'RUN_8000_23',fnames=[],merge=False):
pathToMacro = "$FAIRSHIP/charmdet/"
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'
if merge:
for run in os.listdir('.'):
if run.find(keyword)<0: continue
os.chdir(run)
mergeHistos(local='.',case='momDistributions')
os.chdir('../')
else:
if len(fnames)==0:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
fnames = temp.split('\n')
for x in fnames:
if x.find(keyword)<0: continue
run = x[x.rfind('/')+1:]
if not run in os.listdir('.'): os.system('mkdir '+run)
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco+run,shell=True)
if temp2.find('.root')<0: continue
skip = False
for x in zeroField:
if not run.find(x)<0: skip = True
if skip: continue
os.chdir(run)
makeMomDistributions(run)
os.chdir('../')
def massProductionAlignment(keyword = 'RUN_8000_2395',fnames=[],merge=False):
pathToMacro = "$FAIRSHIP/charmdet/"
eospathReco = '/eos/experiment/ship/user/odurhan/muflux-recodata/'
if merge:
for run in os.listdir('.'):
if run.find(keyword)<0: continue
os.chdir(run)
mergeHistos(local='.')
os.chdir('../')
else:
if len(fnames)==0:
temp = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco,shell=True)
fnames = temp.split('\n')
for x in fnames:
if x.find(keyword)<0: continue
run = x[x.rfind('/')+1:]
if not run in os.listdir('.'):
print "directory for this run does not exist",run
# os.system('mkdir '+run)
continue
temp2 = subprocess.check_output("xrdfs "+os.environ['EOSSHIP']+" ls -l "+eospathReco+run,shell=True)
if temp2.find('.root')<0: continue
os.chdir(run)
fileList = []
for x in temp2.split('\n'):
if x.find('.root')<0: continue
fileList.append( os.environ['EOSSHIP'] + x[x.find('/eos'):])
checkAlignment(fileList)
os.chdir('../')
def redoMuonTracks():
fileList = checkFilesWithTracks(D='.')
for fname in fileList:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoMuonTaggerTracks -u 1 -f "+fname+' &'
print 'redo muonTracks:', cmd
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
print "finished all the tasks."
def reRunReco(r,fname):
fRT = fname.replace('.root','_RT2.root')
os.system('xrdcp -f $EOSSHIP/eos/experiment/ship/data/muflux/DATA_Rebuild_8000/rootdata/'+r+'/'+fname+' '+fRT)
f = ROOT.TFile.Open(os.environ['EOSSHIP']+'/eos/experiment/ship/user/odurhan/muflux-recodata/'+r+'/'+fname.replace('.root','_RT.root'))
ftemp = ROOT.TFile(fRT,'update')
ftemp.cd('')
upkl = Unpickler(f)
tMinAndTmax = upkl.load('tMinAndTmax')
pkl = Pickler(ftemp)
pkl.dump(tMinAndTmax,'tMinAndTmax')
ftemp.mkdir('histos')
ftemp.histos.cd('')
for tc in ['TDCMapsX','hitMapsX']:
tmp = f.histos.Get(tc)
X = tmp.Clone()
X.Write()
ftemp.Write("",ROOT.TFile.kOverwrite)
ftemp.Close()
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c recoStep1 -u 1 -f "+fRT+' &'
os.system(cmd)
print 'step 1:', cmd
def pot():
fileList=[]
# all RT files
for x in os.listdir('.'):
if x.find('_RT')>0 and x.find('histos')<0:
fileList.append(x)
fileList.sort()
scalerStat = {}
for fname in fileList:
f=ROOT.TFile(fname)
if not f.FindKey("scalers"):
print "no scalers in this file",fname
continue
scalers = f.scalers
scalers.GetEntry(0)
for x in scalers.GetListOfBranches():
name = x.GetName()
s = eval('scalers.'+name)
if name!='slices':
if not scalerStat.has_key(name):scalerStat[name]=0
scalerStat[name]+=s
keys = scalerStat.keys()
keys.sort()
for k in keys: print k,':',scalerStat[k]
def makeDTEfficiency(merge=False):
cmd = "hadd -f DTEff.root "
for fname in os.listdir('.'):
if not merge and fname.find('SPILL')==0:
cmd = "python "+pathToMacro+"drifttubeMonitoring.py -c DTeffWithRPCTracks -f "+fname+' &'
os.system(cmd)
time.sleep(10)
while 1>0:
if count_python_processes('drifttubeMonitoring')<ncpus: break
time.sleep(10)
elif merge and fname.find('histos-DTEff')==0:
cmd+=fname+' '
if merge: os.system(cmd)
print "finished all the tasks."
def importMomDistr(keyword = 'RUN_8000_2'):
pathHistos = '/media/truf/disk2/home/truf/ShipSoft/ship-ubuntu-1710-64/'
temp = os.listdir(pathHistos)
for x in temp:
if x.find(keyword)<0: continue
run = x
if not run in os.listdir('.'):
os.system('mkdir '+run)
os.system('cp '+pathHistos+run+'/momDistributions.root '+run)
| [
"[email protected]"
] | |
b0e2af4e4d675713ffc95e2005e39ebb9196bccb | 2b1448085c5ad44e78772dde1dcc2fae9cc4c3cc | /botorch/models/converter.py | 35da4a3d8d21b48c62c2098e7a129b871f4e43c0 | [
"MIT"
] | permissive | leelasd/botorch | 47fa0ff9c5f6c534ecfcba59f5b1bf52eea0d62e | c48bfc822940ee8a6e5e2604d4ff282033dbe892 | refs/heads/master | 2022-12-17T04:42:41.591444 | 2020-09-10T23:45:05 | 2020-09-10T23:46:41 | 294,561,185 | 1 | 0 | MIT | 2020-09-11T01:19:36 | 2020-09-11T01:19:35 | null | UTF-8 | Python | false | false | 8,088 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Utilities for converting between different models.
"""
from __future__ import annotations
from copy import deepcopy
import torch
from botorch.exceptions import UnsupportedError
from botorch.models.gp_regression import FixedNoiseGP, HeteroskedasticSingleTaskGP
from botorch.models.gp_regression_fidelity import SingleTaskMultiFidelityGP
from botorch.models.gpytorch import BatchedMultiOutputGPyTorchModel
from botorch.models.model_list_gp_regression import ModelListGP
from torch.nn import Module
def _get_module(module: Module, name: str) -> Module:
"""Recursively get a sub-module from a module.
Args:
module: A `torch.nn.Module`.
name: The name of the submodule to return, in the form of a period-delinated
string: `sub_module.subsub_module.[...].leaf_module`.
Returns:
The requested sub-module.
Example:
>>> gp = SingleTaskGP(train_X, train_Y)
>>> noise_prior = _get_module(gp, "likelihood.noise_covar.noise_prior")
"""
current = module
if name != "":
for a in name.split("."):
current = getattr(current, a)
return current
def _check_compatibility(models: ModelListGP) -> None:
"""Check if a ModelListGP can be converted."""
# check that all submodules are of the same type
for modn, mod in models[0].named_modules():
mcls = mod.__class__
if not all(isinstance(_get_module(m, modn), mcls) for m in models[1:]):
raise UnsupportedError(
"Sub-modules must be of the same type across models."
)
# check that each model is a BatchedMultiOutputGPyTorchModel
if not all(isinstance(m, BatchedMultiOutputGPyTorchModel) for m in models):
raise UnsupportedError(
"All models must be of type BatchedMultiOutputGPyTorchModel."
)
# TODO: Add support for HeteroskedasticSingleTaskGP
if any(isinstance(m, HeteroskedasticSingleTaskGP) for m in models):
raise NotImplementedError(
"Conversion of HeteroskedasticSingleTaskGP is currently unsupported."
)
# TODO: Add support for custom likelihoods
if any(getattr(m, "_is_custom_likelihood", False) for m in models):
raise NotImplementedError(
"Conversion of models with custom likelihoods is currently unsupported."
)
# check that each model is single-output
if not all(m._num_outputs == 1 for m in models):
raise UnsupportedError("All models must be single-output.")
# check that training inputs are the same
if not all(
torch.equal(ti, tj)
for m in models[1:]
for ti, tj in zip(models[0].train_inputs, m.train_inputs)
):
raise UnsupportedError("training inputs must agree for all sub-models.")
def model_list_to_batched(model_list: ModelListGP) -> BatchedMultiOutputGPyTorchModel:
"""Convert a ModelListGP to a BatchedMultiOutputGPyTorchModel.
Args:
model_list: The `ModelListGP` to be converted to the appropriate
`BatchedMultiOutputGPyTorchModel`. All sub-models must be of the same
type and have the shape (batch shape and number of training inputs).
Returns:
The model converted into a `BatchedMultiOutputGPyTorchModel`.
Example:
>>> list_gp = ModelListGP(gp1, gp2)
>>> batch_gp = model_list_to_batched(list_gp)
"""
models = model_list.models
_check_compatibility(models)
# if the list has only one model, we can just return a copy of that
if len(models) == 1:
return deepcopy(models[0])
# construct inputs
train_X = deepcopy(models[0].train_inputs[0])
train_Y = torch.stack([m.train_targets.clone() for m in models], dim=-1)
kwargs = {"train_X": train_X, "train_Y": train_Y}
if isinstance(models[0], FixedNoiseGP):
kwargs["train_Yvar"] = torch.stack(
[m.likelihood.noise_covar.noise.clone() for m in models], dim=-1
)
if isinstance(models[0], SingleTaskMultiFidelityGP):
init_args = models[0]._init_args
if not all(
v == m._init_args[k] for m in models[1:] for k, v in init_args.items()
):
raise UnsupportedError("All models must have the same fidelity parameters.")
kwargs.update(init_args)
# construct the batched GP model
batch_gp = models[0].__class__(**kwargs)
tensors = {n for n, p in batch_gp.state_dict().items() if len(p.shape) > 0}
scalars = set(batch_gp.state_dict()) - tensors
input_batch_dims = len(models[0]._input_batch_shape)
# ensure scalars agree (TODO: Allow different priors for different outputs)
for n in scalars:
v0 = _get_module(models[0], n)
if not all(torch.equal(_get_module(m, n), v0) for m in models[1:]):
raise UnsupportedError("All scalars must have the same value.")
# ensure dimensions of all tensors agree
for n in tensors:
shape0 = _get_module(models[0], n).shape
if not all(_get_module(m, n).shape == shape0 for m in models[1:]):
raise UnsupportedError("All tensors must have the same shape.")
# now construct the batched state dict
scalar_state_dict = {
s: p.clone() for s, p in models[0].state_dict().items() if s in scalars
}
tensor_state_dict = {
t: (
torch.stack(
[m.state_dict()[t].clone() for m in models], dim=input_batch_dims
)
if "active_dims" not in t
else models[0].state_dict()[t].clone()
)
for t in tensors
}
batch_state_dict = {**scalar_state_dict, **tensor_state_dict}
# load the state dict into the new model
batch_gp.load_state_dict(batch_state_dict)
return batch_gp
def batched_to_model_list(batch_model: BatchedMultiOutputGPyTorchModel) -> ModelListGP:
"""Convert a BatchedMultiOutputGPyTorchModel to a ModelListGP.
Args:
model_list: The `BatchedMultiOutputGPyTorchModel` to be converted to a
`ModelListGP`.
Returns:
The model converted into a `ModelListGP`.
Example:
>>> train_X = torch.rand(5, 2)
>>> train_Y = torch.rand(5, 2)
>>> batch_gp = SingleTaskGP(train_X, train_Y)
>>> list_gp = batched_to_model_list(batch_gp)
"""
# TODO: Add support for HeteroskedasticSingleTaskGP
if isinstance(batch_model, HeteroskedasticSingleTaskGP):
raise NotImplementedError(
"Conversion of HeteroskedasticSingleTaskGP currently not supported."
)
batch_sd = batch_model.state_dict()
tensors = {n for n, p in batch_sd.items() if len(p.shape) > 0}
scalars = set(batch_sd) - tensors
input_bdims = len(batch_model._input_batch_shape)
models = []
for i in range(batch_model._num_outputs):
scalar_sd = {s: batch_sd[s].clone() for s in scalars}
tensor_sd = {
t: (
batch_sd[t].select(input_bdims, i).clone()
if "active_dims" not in t
else batch_sd[t].clone()
)
for t in tensors
}
sd = {**scalar_sd, **tensor_sd}
kwargs = {
"train_X": batch_model.train_inputs[0].select(input_bdims, i).clone(),
"train_Y": batch_model.train_targets.select(input_bdims, i)
.clone()
.unsqueeze(-1),
}
if isinstance(batch_model, FixedNoiseGP):
noise_covar = batch_model.likelihood.noise_covar
kwargs["train_Yvar"] = (
noise_covar.noise.select(input_bdims, i).clone().unsqueeze(-1)
)
if isinstance(batch_model, SingleTaskMultiFidelityGP):
kwargs.update(batch_model._init_args)
model = batch_model.__class__(**kwargs)
model.load_state_dict(sd)
models.append(model)
return ModelListGP(*models)
| [
"[email protected]"
] | |
7fa2949cf1cd3bc986e1801d3d60ef78650ba85e | 8186514b510a801863229e3f9711c0c657e727e5 | /assembly/0427/explore_qt/22/2.py | 1995344ca3c4e4d6ee83bf7b963ca016295d0b6c | [] | no_license | masknugget/mypyqt | 274b2cbbf66c04927453815248f9c1bc5e65ca17 | b86a49e4b8c7c8c3d8546ce1b49f8f3bb6332307 | refs/heads/main | 2023-08-17T13:30:11.451066 | 2021-09-27T14:14:54 | 2021-09-27T14:14:54 | 355,904,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,037 | py | import sys
from PyQt5.QtWidgets import QApplication, QWidget, QInputDialog, QLineEdit, QTextEdit, QPushButton, \
QGridLayout
class Demo(QWidget):
def __init__(self):
super(Demo, self).__init__()
self.name_btn = QPushButton('Name', self)
self.gender_btn = QPushButton('Gender', self)
self.age_btn = QPushButton('Age', self)
self.score_btn = QPushButton('Score', self)
self.info_btn = QPushButton('Info', self)
self.name_btn.clicked.connect(lambda: self.open_dialog_func(self.name_btn))
self.gender_btn.clicked.connect(lambda: self.open_dialog_func(self.gender_btn))
self.age_btn.clicked.connect(lambda: self.open_dialog_func(self.age_btn))
self.score_btn.clicked.connect(lambda: self.open_dialog_func(self.score_btn))
self.info_btn.clicked.connect(lambda: self.open_dialog_func(self.info_btn))
self.name_line = QLineEdit(self)
self.gender_line = QLineEdit(self)
self.age_line = QLineEdit(self)
self.score_line = QLineEdit(self)
self.info_textedit = QTextEdit(self)
self.g_layout = QGridLayout()
self.g_layout.addWidget(self.name_btn, 0, 0, 1, 1)
self.g_layout.addWidget(self.name_line, 0, 1, 1, 1)
self.g_layout.addWidget(self.gender_btn, 1, 0, 1, 1)
self.g_layout.addWidget(self.gender_line,1, 1, 1, 1)
self.g_layout.addWidget(self.age_btn, 2, 0, 1, 1)
self.g_layout.addWidget(self.age_line, 2, 1, 1, 1)
self.g_layout.addWidget(self.score_btn, 3, 0, 1, 1)
self.g_layout.addWidget(self.score_line, 3, 1, 1, 1)
self.g_layout.addWidget(self.info_btn, 4, 0, 1, 1)
self.g_layout.addWidget(self.info_textedit, 4, 1, 1, 1)
self.setLayout(self.g_layout)
def open_dialog_func(self, btn):
if btn == self.name_btn: # 1
name, ok = QInputDialog.getText(self, 'Name Input', 'Please enter the name:')
if ok:
self.name_line.setText(name)
elif btn == self.gender_btn: # 2
gender_list = ['Female', 'Male']
gender, ok = QInputDialog.getItem(self, 'Gender Input', 'Please choose the gender:', gender_list, 0, False)
if ok:
self.gender_line.setText(gender)
elif btn == self.age_btn:
age, ok = QInputDialog.getInt(self, 'Age Input', 'Please select the age:')
if ok:
self.age_line.setText(str(age))
elif btn == self.score_btn:
score, ok = QInputDialog.getDouble(self, 'Score Input', 'Please select the score:')
if ok:
self.score_line.setText(str(score))
else:
info, ok = QInputDialog.getMultiLineText(self, 'Info Input', 'Please enter the info:')
if ok:
self.info_textedit.setText(info)
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
963c21dcb3fda320cc53ce7e08d427b37c2d8aea | 6a2b1b1d6092a8d2492a6677b6fd19d27b0f461f | /08-Python-DataTypes/Tuples/02-create-one-element-tuple.py | 05fa335a53400e9ea8a0525d7b35a9f3a2482310 | [] | no_license | Uttam1982/PythonTutorial | 3cfbe237199e048967502f3d0c1936f2b878cb87 | 8e28cc5c4be5826a011059db66f6952871248c82 | refs/heads/master | 2022-12-17T18:47:28.397383 | 2020-09-22T08:55:23 | 2020-09-22T08:55:23 | 288,524,784 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 535 | py | # Creating a tuple with one element is a bit tricky.
# 1. Having one element within parentheses is not enough.
# 2. We will need a trailing comma to indicate that it is, in fact, a tuple.
my_tuple = ("python")
print("tuple without trailing comma: ",type(my_tuple)) # <class 'str'>
#Creating a tuple having one element
my_tuple = ("python",)
print("tuple with trailing comma: ",type(my_tuple)) # <class 'tuple'>
## Parentheses is optional
my_tuple = "python",
print("Parentheses is optional: ",type(my_tuple)) # <class 'tuple'>
| [
"[email protected]"
] | |
3667c8f1f8d45c41f552e9abe2d97e7838ac9395 | ef187d259d33e97c7b9ed07dfbf065cec3e41f59 | /work/atcoder/abc/abc083/B/answers/896331_rin1120.py | b9ceb3808d88e51ac154a5487becbe592cfa4936 | [] | no_license | kjnh10/pcw | 847f7295ea3174490485ffe14ce4cdea0931c032 | 8f677701bce15517fb9362cc5b596644da62dca8 | refs/heads/master | 2020-03-18T09:54:23.442772 | 2018-07-19T00:26:09 | 2018-07-19T00:26:09 | 134,586,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | N, A, B = map(int, input().split())
ans=0
for i in range(N):
t = 0
j = i+1
while j != 0:
t += j%10
j //= 10
if A <= t and t <= B:
ans += i+1
print(ans) | [
"[email protected]"
] | |
89cd4ca057d69b4c1e05d0a821256293352b855f | 4a8c1f7d9935609b780aff95c886ef7781967be0 | /atcoder/_codeforces/1467_c.py | a74cb1c4230efe5766c5cfc9695586b0a0b3e910 | [] | no_license | recuraki/PythonJunkTest | d5e5f5957ac5dd0c539ef47759b1fe5ef7a2c52a | 2556c973d468a6988d307ce85c5f2f8ab15e759a | refs/heads/master | 2023-08-09T17:42:21.875768 | 2023-07-18T23:06:31 | 2023-07-18T23:06:31 | 13,790,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,557 | py | import sys
from io import StringIO
import unittest
import logging
logging.basicConfig(level=logging.DEBUG)
def resolve():
from pprint import pprint
import sys
input = sys.stdin.readline
def do():
n1, n2, n3 = map(int, input().split())
dat1 = list(map(int, input().split()))
dat2 = list(map(int, input().split()))
dat3 = list(map(int, input().split()))
q = int(input())
for _ in range(q):
do()
# do()
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_input_1(self):
print("test_input_1")
input = """2 4 1
1 2
6 3 4 5
5"""
output = """20"""
self.assertIO(input, output)
def test_input_2(self):
print("test_input_2")
input = """3 2 2
7 5 4
2 9
7 1"""
output = """29"""
self.assertIO(input, output)
def test_input_3(self):
print("test_input_3")
input = """xxx"""
output = """xxx"""
self.assertIO(input, output)
def test_input_4(self):
print("test_input_4")
input = """xxx"""
output = """xxx"""
self.assertIO(input, output)
if __name__ == "__main__":
unittest.main() | [
"[email protected]"
] | |
adbeff76935cbd7b2290404a3caf4ecbd26075b6 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/nouns/_motion.py | 19c9b83026d27cb66cfde07dd09ad7733cf6dde8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 527 | py |
#calss header
class _MOTION():
def __init__(self,):
self.name = "MOTION"
self.definitions = [u'the act or process of moving, or a particular action or movement: ', u'a polite way of referring to the process of getting rid of solid waste from the body, or the waste itself: ', u'a formal suggestion made, discussed, and voted on at a meeting: ']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'nouns'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
26d639c4fbab5876c769b3ea6ae7da455fd84403 | 1f7847055332e16614f5358f0ec39b39bb9a66a7 | /exercises/12_oop_inheritance/test_task_12_4.py | ec440e80a177b6ac47dabd01f370487663a50659 | [] | no_license | satperm/advpyneng-examples-exercises | 6641dae31fa7f44db7e99547bc70d740988f21b9 | 6b12c320cace1d303dae38ddba9b19550a8708ec | refs/heads/master | 2022-12-14T09:28:48.255804 | 2020-09-06T14:14:42 | 2020-09-06T14:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,911 | py | import pytest
import task_12_4
import sys
sys.path.append('..')
from common_functions import check_class_exists, check_attr_or_method
def test_class_created():
check_class_exists(task_12_4, 'OrderingMixin')
def test_special_methods_created():
class IntTest(task_12_4.OrderingMixin):
def __init__(self, number):
self._number = number
def __eq__(self, other):
return self._number == other._number
def __lt__(self, other):
return self._number < other._number
int1 = IntTest(5)
check_attr_or_method(int1, method='__ge__')
check_attr_or_method(int1, method='__ne__')
check_attr_or_method(int1, method='__le__')
check_attr_or_method(int1, method='__gt__')
def test_methods():
class IntTest(task_12_4.OrderingMixin):
def __init__(self, number):
self._number = number
def __eq__(self, other):
return self._number == other._number
def __lt__(self, other):
return self._number < other._number
int1 = IntTest(5)
int2 = IntTest(3)
assert int1 != int2
assert int1 >= int2
assert int1 > int2
assert not int1 < int2
def test_methods():
class DoThing(task_12_4.OrderingMixin):
def __init__(self, num):
self.num = num
def __eq__(self, other):
return self.num == other.num
def __lt__(self, other):
return self.num < other.num
small_num = DoThing(1)
big_num = DoThing(100)
assert small_num < big_num
assert small_num <= big_num
assert not small_num > big_num
assert not small_num >= big_num
assert small_num != big_num
small_num = DoThing(1)
big_num = DoThing(100)
assert not big_num < small_num
assert not big_num <= small_num
assert big_num > small_num
assert big_num >= small_num
assert big_num != small_num
| [
"[email protected]"
] | |
7ecbe0b308cb8371f7ee5198762f1a81ddafae19 | fca80c6a22bcce507a81e05cd31e0d5ebbc43a57 | /Chapter_05/samples/guestPicnic.py | cfe00f043c0535219fe766ef773df1d474944cd1 | [
"MIT"
] | permissive | GSantos23/Automate_Python | 6b1ce29f1ee5a22b53ef6c1d45fef56d8d8e0b06 | 4bf3eadb5a330d5f22329bdcd08d37ab01a9454f | refs/heads/master | 2021-06-29T04:12:32.910835 | 2020-12-26T22:28:31 | 2020-12-26T22:28:31 | 197,512,449 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | allGuests = {'Alice': {'apples': 5, 'pretzels': 12},
'Bob': {'ham sandwiches': 3, 'apples': 2},
'Carol': {'cups': 3, 'apple pies': 1}}
def totalBrought(guests, item):
numBrought = 0
for k, v in guests.items():
numBrought = numBrought + v.get(item, 0)
return numBrought
print('Number of things being brought:')
print(' - Apples ' + str(totalBrought(allGuests, 'apples')))
print(' - Cups ' + str(totalBrought(allGuests, 'cups')))
print(' - Cakes ' + str(totalBrought(allGuests, 'cakes')))
print(' - Ham sandwiches ' + str(totalBrought(allGuests, 'ham sandwiches')))
print(' - Apple Pies ' + str(totalBrought(allGuests, 'apple pies')))
| [
"[email protected]"
] | |
6343752dd269bc7d88d0650fcc42fd02bb191453 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/1760.py | 751381d98be1f23c884db62b177e89e978aaa771 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 512 | py | ii = [('CookGHP3.py', 1), ('CoolWHM2.py', 1), ('SadlMLP.py', 5), ('ShawHDE.py', 1), ('UnitAI.py', 1), ('PeckJNG.py', 1), ('AubePRP.py', 13), ('AdamWEP.py', 1), ('FitzRNS3.py', 1), ('ClarGE2.py', 53), ('CookGHP2.py', 4), ('CrokTPS.py', 1), ('ClarGE.py', 16), ('LyelCPG.py', 13), ('WestJIT2.py', 3), ('WadeJEB.py', 12), ('SoutRD2.py', 1), ('MereHHB3.py', 4), ('HogaGMM.py', 3), ('BabbCEM.py', 1), ('SomeMMH.py', 8), ('ClarGE3.py', 30), ('DibdTRL.py', 1), ('HogaGMM2.py', 10), ('EvarJSP.py', 1), ('SadlMLP2.py', 4)] | [
"[email protected]"
] | |
c694b3c710fa1d7d763f6b5b2c107b194a665936 | 564d6a4d305a8ac6a7e01c761831fb2081c02d0f | /sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations/_route_tables_operations.py | adc44f5b31718ed15851b48a3e9e38cdbd048214 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | paultaiton/azure-sdk-for-python | 69af4d889bac8012b38f5b7e8108707be679b472 | d435a1a25fd6097454b7fdfbbdefd53e05029160 | refs/heads/master | 2023-01-30T16:15:10.647335 | 2020-11-14T01:09:50 | 2020-11-14T01:09:50 | 283,343,691 | 0 | 0 | MIT | 2020-07-28T22:43:43 | 2020-07-28T22:43:43 | null | UTF-8 | Python | false | false | 25,722 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class RouteTablesOperations:
"""RouteTablesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
route_table_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def get(
self,
resource_group_name: str,
route_table_name: str,
expand: Optional[str] = None,
**kwargs
) -> "models.RouteTable":
"""Gets the specified route table.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.RouteTable",
**kwargs
) -> "models.RouteTable":
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'RouteTable')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('RouteTable', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.RouteTable",
**kwargs
) -> AsyncLROPoller["models.RouteTable"]:
"""Create or updates a route table in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to the create or update route table operation.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.RouteTable
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RouteTable or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_11_01.models.RouteTable]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
route_table_name=route_table_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
route_table_name: str,
parameters: "models.TagsObject",
**kwargs
) -> "models.RouteTable":
"""Updates a route table tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param route_table_name: The name of the route table.
:type route_table_name: str
:param parameters: Parameters supplied to update route table tags.
:type parameters: ~azure.mgmt.network.v2019_11_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RouteTable, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.RouteTable
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTable"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'routeTableName': self._serialize.url("route_table_name", route_table_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RouteTable', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables/{routeTableName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.RouteTableListResult"]:
"""Gets all route tables in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/routeTables'} # type: ignore
def list_all(
self,
**kwargs
) -> AsyncIterable["models.RouteTableListResult"]:
"""Gets all route tables in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either RouteTableListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.RouteTableListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.RouteTableListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('RouteTableListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/routeTables'} # type: ignore
| [
"[email protected]"
] | |
2b43980f401fc20884576fe5b39260203c3a7de9 | ab79f8297105a7d412303a8b33eaa25038f38c0b | /imersia/vit_product/stock.py | d8ccbb76ecfe1daf893e0694292440f3b1ff45a0 | [] | no_license | adahra/addons | 41a23cbea1e35079f7a9864ade3c32851ee2fb09 | c5a5678379649ccdf57a9d55b09b30436428b430 | refs/heads/master | 2022-06-17T21:22:22.306787 | 2020-05-15T10:51:14 | 2020-05-15T10:51:14 | 264,167,002 | 1 | 0 | null | 2020-05-15T10:39:26 | 2020-05-15T10:39:26 | null | UTF-8 | Python | false | false | 5,172 | py | import time
from openerp.osv import fields
from openerp.osv import osv
from openerp.tools.translate import _
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT, float_compare
class StockMove(osv.osv):
_inherit = 'stock.move'
def _src_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'location_production')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
def _dest_id_default(self, cr, uid, ids, context=None):
try:
location_model, location_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'stock', 'stock_location_stock')
self.pool.get('stock.location').check_access_rule(cr, uid, [location_id], 'read', context=context)
except (orm.except_orm, ValueError):
location_id = False
return location_id
_columns = {
'custom_production_id': fields.many2one('mrp.production.custom', 'Production Order for Produced Products', select=True, copy=False),
'waste_qty':fields.float('Waste (%)'),
# 'raw_material_production_id': fields.many2one('mrp.production', 'Production Order for Raw Materials', select=True),
'consumed_for': fields.many2one('stock.move', 'Consumed for', help='Technical field used to make the traceability of produced products'),
}
def action_consume_custom(self, cr, uid, ids, product_qty, location_id=False, restrict_lot_id=False, restrict_partner_id=False,
consumed_for=False, context=None):
""" Consumed product with specific quantity from specific source location.
@param product_qty: Consumed/produced product quantity (= in quantity of UoM of product)
@param location_id: Source location
@param restrict_lot_id: optionnal parameter that allows to restrict the choice of quants on this specific lot
@param restrict_partner_id: optionnal parameter that allows to restrict the choice of quants to this specific partner
@param consumed_for: optionnal parameter given to this function to make the link between raw material consumed and produced product, for a better traceability
@return: New lines created if not everything was consumed for this line
"""
if context is None:
context = {}
res = []
production_obj = self.pool.get('mrp.production.custom')
if product_qty <= 0:
raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))
#because of the action_confirm that can create extra moves in case of phantom bom, we need to make 2 loops
ids2 = []
for move in self.browse(cr, uid, ids, context=context):
if move.state == 'draft':
ids2.extend(self.action_confirm(cr, uid, [move.id], context=context))
else:
ids2.append(move.id)
prod_orders = set()
for move in self.browse(cr, uid, ids2, context=context):
prod_orders.add(move.custom_production_id.id)
print"Total Qty>>>",product_qty
move_qty = product_qty
if move_qty <= 0.00:
raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))
quantity_rest = move_qty - product_qty
print"Rest Qty>>>",quantity_rest
# Compare with numbers of move uom as we want to avoid a split with 0 qty
quantity_rest_uom = move.product_uom_qty - self.pool.get("product.uom")._compute_qty_obj(cr, uid, move.product_id.uom_id, product_qty, move.product_uom)
if float_compare(quantity_rest_uom, 0, precision_rounding=move.product_uom.rounding) != 0:
new_mov = self.split(cr, uid, move, quantity_rest, context=context)
print"New Move>>>",new_mov
res.append(new_mov)
vals = {'restrict_lot_id': restrict_lot_id,
'restrict_partner_id': restrict_partner_id,
'consumed_for': consumed_for}
if location_id:
vals.update({'location_id': location_id})
self.write(cr, uid, [move.id], vals, context=context)
# Original moves will be the quantities consumed, so they need to be done
self.action_done(cr, uid, ids2, context=context)
if res:
self.action_assign(cr, uid, res, context=context)
if prod_orders:
production_obj.action_in_production(cr, uid, list(prod_orders), context=None)
#production_obj.signal_workflow(cr, uid, list(prod_orders), 'button_produce')
return res
_defaults = {
'location_id': _src_id_default,
'location_dest_id': _dest_id_default
} | [
"prog1@381544ba-743e-41a5-bf0d-221725b9d5af"
] | prog1@381544ba-743e-41a5-bf0d-221725b9d5af |
b6f6432a451ac396f4378d34ae642e68e475e1e3 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/machinelearningservices/v20210101/get_aks_service.py | d436cd9c6fca64cd385942d716820e2980e1cc9c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 5,922 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetAKSServiceResult',
'AwaitableGetAKSServiceResult',
'get_aks_service',
]
@pulumi.output_type
class GetAKSServiceResult:
"""
Machine Learning service object wrapped into ARM resource envelope.
"""
def __init__(__self__, id=None, identity=None, location=None, name=None, properties=None, sku=None, system_data=None, tags=None, type=None):
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if system_data and not isinstance(system_data, dict):
raise TypeError("Expected argument 'system_data' to be a dict")
pulumi.set(__self__, "system_data", system_data)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.IdentityResponse']:
"""
The identity of the resource.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
Service properties
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
The sku of the workspace.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> 'outputs.SystemDataResponse':
"""
Read only system data
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetAKSServiceResult(GetAKSServiceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAKSServiceResult(
id=self.id,
identity=self.identity,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
system_data=self.system_data,
tags=self.tags,
type=self.type)
def get_aks_service(expand: Optional[bool] = None,
resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
workspace_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAKSServiceResult:
"""
Machine Learning service object wrapped into ARM resource envelope.
:param bool expand: Set to True to include Model details.
:param str resource_group_name: Name of the resource group in which workspace is located.
:param str service_name: Name of the Azure Machine Learning service.
:param str workspace_name: Name of Azure Machine Learning workspace.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['workspaceName'] = workspace_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:machinelearningservices/v20210101:getAKSService', __args__, opts=opts, typ=GetAKSServiceResult).value
return AwaitableGetAKSServiceResult(
id=__ret__.id,
identity=__ret__.identity,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
system_data=__ret__.system_data,
tags=__ret__.tags,
type=__ret__.type)
| [
"[email protected]"
] | |
cd9bb1eb10be89931f7564472027e88621ad041e | 8143bfdbda6fdbef40bc570f48773edd365fcb62 | /project/Kyb-TestProject/businessView/loginView.py | 895c338f89df61341cca470210d9b35b905c1f74 | [] | no_license | CaptainJi/Kyb-TestProject | 199caef0f1e58d6bb45273114596daf6ebdc424c | 38d200d4d8436d4ad699682c3606f035446093cc | refs/heads/master | 2022-10-16T15:36:20.499879 | 2020-06-06T07:06:22 | 2020-06-06T07:06:22 | 259,554,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,798 | py | import logging
from common.commonFun import Common, NoSuchElementException
from common.desiredCaps import appium_desired
from selenium.webdriver.common.by import By
# 封装登录业务逻辑类
class LoginView(Common):
# 获取用户名、密码输入框元素
username_type = (By.ID, 'com.tal.kaoyan:id/login_email_edittext')
password_type = (By.ID, 'com.tal.kaoyan:id/login_password_edittext')
# 获取登录按钮元素
loginBtn = (By.ID, 'com.tal.kaoyan:id/login_login_btn')
tip_commit = (By.ID, 'com.tal.kaoyan:id/tip_commit')
# 获取“我的”按钮元素
button_mysefl = (By.ID, 'com.tal.kaoyan:id/mainactivity_button_mysefl')
usercenter_username = (By.ID, 'com.tal.kaoyan:id/activity_usercenter_username')
right_button = (By.ID, 'com.tal.kaoyan:id/myapptitle_RightButton_textview')
# 获取退出元素
logout = (By.ID, 'com.tal.kaoyan:id/setting_logout_text')
def login_action(self, username, password):
# 取消升级
self.check_cancel_btn()
# 跳过
self.check_skipBtn()
logging.info('开始登录')
logging.info('用户名:%s' % username)
self.driver.find_element(*self.username_type).send_keys(username)
logging.info('密码:%s' % password)
self.driver.find_element(*self.password_type).send_keys(password)
logging.info('点击登录按钮')
self.driver.find_element(*self.loginBtn).click()
def check_account_alert(self):
logging.info('检查登录警告信息')
try:
element = self.driver.find_element(*self.tip_commit)
except NoSuchElementException:
pass
else:
logging.info('跳过登录警告信息')
element.click()
def check_login_status(self):
logging.info('检查登录状态')
self.check_market_ad()
self.check_account_alert()
try:
self.driver.find_element(*self.button_mysefl).click()
self.driver.find_element(*self.usercenter_username)
except NoSuchElementException:
logging.error('登陆失败')
self.getScreenShot('登陆失败')
return False
else:
logging.info('登陆成功')
self.getScreenShot('登陆成功')
self.logout_action()
return True
def logout_action(self):
logging.info('退出登录')
self.driver.find_element(*self.right_button).click()
self.driver.find_element(*self.logout).click()
self.driver.find_element(*self.tip_commit).click()
if __name__ == '__main__':
driver = appium_desired()
l = LoginView(driver)
l.check_cancel_btn()
l.check_skipBtn()
l.login_action('', '')
l.check_login_status()
| [
"[email protected]"
] | |
b65f96bee6c891e742a26f9d3d76f59dec94b3e2 | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/cdn/v20200331/_enums.py | 656075cbb5232d664bcd6a85457bace0d55ba8c4 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 10,641 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ActionType',
'CacheBehavior',
'CacheType',
'CookiesOperator',
'CustomRuleEnabledState',
'DeliveryRuleAction',
'DestinationProtocol',
'GeoFilterActions',
'HeaderAction',
'HealthProbeRequestType',
'HttpVersionOperator',
'IsDeviceOperator',
'ManagedRuleEnabledState',
'MatchVariable',
'Operator',
'OptimizationType',
'PolicyEnabledState',
'PolicyMode',
'PostArgsOperator',
'ProbeProtocol',
'QueryStringBehavior',
'QueryStringCachingBehavior',
'QueryStringOperator',
'RedirectType',
'RemoteAddressOperator',
'RequestBodyOperator',
'RequestHeaderOperator',
'RequestMethodOperator',
'RequestUriOperator',
'ResponseBasedDetectedErrorTypes',
'SkuName',
'Transform',
'TransformType',
'UrlFileExtensionOperator',
'UrlFileNameOperator',
'UrlPathOperator',
]
class ActionType(str, Enum):
"""
Describes what action to be applied when rule matches
"""
ALLOW = "Allow"
BLOCK = "Block"
LOG = "Log"
REDIRECT = "Redirect"
class CacheBehavior(str, Enum):
"""
Caching behavior for the requests
"""
BYPASS_CACHE = "BypassCache"
OVERRIDE = "Override"
SET_IF_MISSING = "SetIfMissing"
class CacheType(str, Enum):
"""
The level at which the content needs to be cached.
"""
ALL = "All"
class CookiesOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class CustomRuleEnabledState(str, Enum):
"""
Describes if the custom rule is in enabled or disabled state. Defaults to Enabled if not specified.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class DeliveryRuleAction(str, Enum):
"""
The name of the action for the delivery rule.
"""
CACHE_EXPIRATION = "CacheExpiration"
CACHE_KEY_QUERY_STRING = "CacheKeyQueryString"
MODIFY_REQUEST_HEADER = "ModifyRequestHeader"
MODIFY_RESPONSE_HEADER = "ModifyResponseHeader"
URL_REDIRECT = "UrlRedirect"
URL_REWRITE = "UrlRewrite"
URL_SIGNING = "UrlSigning"
class DestinationProtocol(str, Enum):
"""
Protocol to use for the redirect. The default value is MatchRequest
"""
MATCH_REQUEST = "MatchRequest"
HTTP = "Http"
HTTPS = "Https"
class GeoFilterActions(str, Enum):
"""
Action of the geo filter, i.e. allow or block access.
"""
BLOCK = "Block"
ALLOW = "Allow"
class HeaderAction(str, Enum):
"""
Action to perform
"""
APPEND = "Append"
OVERWRITE = "Overwrite"
DELETE = "Delete"
class HealthProbeRequestType(str, Enum):
"""
The type of health probe request that is made.
"""
NOT_SET = "NotSet"
GET = "GET"
HEAD = "HEAD"
class HttpVersionOperator(str, Enum):
"""
Describes operator to be matched
"""
EQUAL = "Equal"
class IsDeviceOperator(str, Enum):
"""
Describes operator to be matched
"""
EQUAL = "Equal"
class ManagedRuleEnabledState(str, Enum):
"""
Describes if the managed rule is in enabled or disabled state. Defaults to Disabled if not specified.
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class MatchVariable(str, Enum):
"""
Match variable to compare against.
"""
REMOTE_ADDR = "RemoteAddr"
SOCKET_ADDR = "SocketAddr"
REQUEST_METHOD = "RequestMethod"
REQUEST_HEADER = "RequestHeader"
REQUEST_URI = "RequestUri"
QUERY_STRING = "QueryString"
REQUEST_BODY = "RequestBody"
COOKIES = "Cookies"
POST_ARGS = "PostArgs"
class Operator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
IP_MATCH = "IPMatch"
GEO_MATCH = "GeoMatch"
EQUAL = "Equal"
CONTAINS = "Contains"
LESS_THAN = "LessThan"
GREATER_THAN = "GreaterThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
REG_EX = "RegEx"
class OptimizationType(str, Enum):
"""
Specifies what scenario the customer wants this CDN endpoint to optimize for, e.g. Download, Media services. With this information, CDN can apply scenario driven optimization.
"""
GENERAL_WEB_DELIVERY = "GeneralWebDelivery"
GENERAL_MEDIA_STREAMING = "GeneralMediaStreaming"
VIDEO_ON_DEMAND_MEDIA_STREAMING = "VideoOnDemandMediaStreaming"
LARGE_FILE_DOWNLOAD = "LargeFileDownload"
DYNAMIC_SITE_ACCELERATION = "DynamicSiteAcceleration"
class PolicyEnabledState(str, Enum):
"""
describes if the policy is in enabled state or disabled state
"""
DISABLED = "Disabled"
ENABLED = "Enabled"
class PolicyMode(str, Enum):
"""
Describes if it is in detection mode or prevention mode at policy level.
"""
PREVENTION = "Prevention"
DETECTION = "Detection"
class PostArgsOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class ProbeProtocol(str, Enum):
"""
Protocol to use for health probe.
"""
NOT_SET = "NotSet"
HTTP = "Http"
HTTPS = "Https"
class QueryStringBehavior(str, Enum):
"""
Caching behavior for the requests
"""
INCLUDE = "Include"
INCLUDE_ALL = "IncludeAll"
EXCLUDE = "Exclude"
EXCLUDE_ALL = "ExcludeAll"
class QueryStringCachingBehavior(str, Enum):
"""
Defines how CDN caches requests that include query strings. You can ignore any query strings when caching, bypass caching to prevent requests that contain query strings from being cached, or cache every request with a unique URL.
"""
IGNORE_QUERY_STRING = "IgnoreQueryString"
BYPASS_CACHING = "BypassCaching"
USE_QUERY_STRING = "UseQueryString"
NOT_SET = "NotSet"
class QueryStringOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class RedirectType(str, Enum):
"""
The redirect type the rule will use when redirecting traffic.
"""
MOVED = "Moved"
FOUND = "Found"
TEMPORARY_REDIRECT = "TemporaryRedirect"
PERMANENT_REDIRECT = "PermanentRedirect"
class RemoteAddressOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
IP_MATCH = "IPMatch"
GEO_MATCH = "GeoMatch"
class RequestBodyOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class RequestHeaderOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class RequestMethodOperator(str, Enum):
"""
Describes operator to be matched
"""
EQUAL = "Equal"
class RequestUriOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class ResponseBasedDetectedErrorTypes(str, Enum):
"""
Type of response errors for real user requests for which origin will be deemed unhealthy
"""
NONE = "None"
TCP_ERRORS_ONLY = "TcpErrorsOnly"
TCP_AND_HTTP_ERRORS = "TcpAndHttpErrors"
class SkuName(str, Enum):
"""
Name of the pricing tier.
"""
STANDARD_VERIZON = "Standard_Verizon"
PREMIUM_VERIZON = "Premium_Verizon"
CUSTOM_VERIZON = "Custom_Verizon"
STANDARD_AKAMAI = "Standard_Akamai"
STANDARD_CHINA_CDN = "Standard_ChinaCdn"
STANDARD_MICROSOFT = "Standard_Microsoft"
PREMIUM_CHINA_CDN = "Premium_ChinaCdn"
class Transform(str, Enum):
"""
Describes what transforms are applied before matching
"""
LOWERCASE = "Lowercase"
UPPERCASE = "Uppercase"
class TransformType(str, Enum):
"""
Describes what transforms were applied before matching.
"""
LOWERCASE = "Lowercase"
UPPERCASE = "Uppercase"
TRIM = "Trim"
URL_DECODE = "UrlDecode"
URL_ENCODE = "UrlEncode"
REMOVE_NULLS = "RemoveNulls"
class UrlFileExtensionOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class UrlFileNameOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
class UrlPathOperator(str, Enum):
"""
Describes operator to be matched
"""
ANY = "Any"
EQUAL = "Equal"
CONTAINS = "Contains"
BEGINS_WITH = "BeginsWith"
ENDS_WITH = "EndsWith"
LESS_THAN = "LessThan"
LESS_THAN_OR_EQUAL = "LessThanOrEqual"
GREATER_THAN = "GreaterThan"
GREATER_THAN_OR_EQUAL = "GreaterThanOrEqual"
WILDCARD = "Wildcard"
| [
"[email protected]"
] | |
9deed2e10501ba1a8d6f3c0f052412d7cbb1bb3d | dd097c7ae744227b0312d762ee0482a3380ff8c6 | /plot_tg.py | 9f751bebfaa64b4b76be445e5325e06e65df06b0 | [] | no_license | moflaher/workspace_python | 0d6e98274d923a721db2b345f65c20b02ca59d08 | 6551e3602ead3373eafce10d11ce7b96bdcb106f | refs/heads/master | 2023-03-06T02:15:01.945481 | 2023-03-01T19:15:51 | 2023-03-01T19:15:51 | 20,814,932 | 3 | 4 | null | null | null | null | UTF-8 | Python | false | false | 7,079 | py | from __future__ import division,print_function
import matplotlib as mpl
import scipy as sp
from folderpath import *
from datatools import *
from gridtools import *
from plottools import *
from projtools import *
from stattools import *
import interptools as ipt
import matplotlib.tri as mplt
import matplotlib.pyplot as plt
#from mpl_toolkits.basemap import Basemap
import os as os
import sys
np.set_printoptions(precision=8,suppress=True,threshold=sys.maxsize)
import pandas as pd
import netCDF4 as n4
import copy
import matplotlib.dates as dates
import argparse
try:
import ttide
tide=True
except:
print('No ttide')
tide=False
parser = argparse.ArgumentParser()
parser.add_argument("grid", help="name of the grid", type=str)
parser.add_argument("name", help="name of the run", type=str,default=None, nargs='?')
parser.add_argument("--station", help="switch to station output instead of fvcom output", default=False,action='store_true')
parser.add_argument("-dates", help="specify start and end date",type=str,nargs=2,default=None)
parser.add_argument("-snr", help="signal to noise ratio value used for constituent cutoff", type=float,default=2.0)
parser.add_argument("-skipdays", help="number of days to skip at start of timeseries", type=float,default=14.0)
args = parser.parse_args()
print("The current commandline arguments being used are")
print(args)
name=args.name
grid=args.grid
if args.station:
tag='station'
else:
tag='fvcom'
# find tg ncfiles
months = dates.MonthLocator()
monthsFmt = dates.DateFormatter('%b')
savepath='{}png/{}/tg/{}/'.format(figpath,grid,name)
if not os.path.exists(savepath): os.makedirs(savepath)
savepath2='{}png/{}/tg/{}/csv/'.format(figpath,grid,name)
if not os.path.exists(savepath2): os.makedirs(savepath2)
inpath='{}{}/tg/{}/'.format(datapath,grid,name)
filenames=glob.glob('{}tg_*_{}.nc'.format(inpath,tag))
filenames.sort()
#tg_*.nc'.format(obspath)
for i,filename in enumerate(filenames):
print('='*80)
print(i)
print(filename)
tgm = loadnc('',filename,False)
tgo = loadnc('{}east/all/'.format(obspath),'tg_{:05d}.nc'.format(tgm['tgnumber'][0]),False)
if args.dates is not None:
din=dates.datestr2num(args.dates)
figstr='{}{}_{}_tg_{:05d}_{}_to_{}.png'.format(savepath,grid,name,tgm['tgnumber'][0],args.dates[0],args.dates[1])
figstr2='{}{}_{}_tg_{:05d}_residual_{}_to_{}.png'.format(savepath,grid,name,tgm['tgnumber'][0],args.dates[0],args.dates[1])
figstr3='{}{}_{}_tg_{:05d}_{}_to_{}'.format(savepath2,grid,name,tgm['tgnumber'][0],args.dates[0],args.dates[1])
else:
din=np.array([tgm['time'][0]+args.skipdays,tgm['time'][-1]])
figstr='{}{}_{}_tg_{:05d}.png'.format(savepath,grid,name,tgm['tgnumber'][0])
figstr2='{}{}_{}_tg_{:05d}_residual.png'.format(savepath,grid,name,tgm['tgnumber'][0])
figstr3='{}{}_{}_tg_{:05d}'.format(savepath2,grid,name,tgm['tgnumber'][0])
idx=np.argwhere((tgo['time']>=din[0]) & (tgo['time']<=din[1]))
idx=np.ravel(idx)
time1,data1,data2=interp_clean_common(tgo['time'][idx],tgo['zeta'][idx],tgm['time'],tgm['zeta'],500,-500)
stats=residual_stats(data2-np.mean(data2), data1-np.mean(data1))
a=pd.DataFrame(stats,index=[0]).round(2).T[0]
f=plt.figure(figsize=(15,5));
ax=f.add_axes([.125,.1,.775,.8]);
ax.plot(time1,data1-np.mean(data1),'k',label='TG: {:05d}'.format(tgm['tgnumber'][0]))
ax.plot(time1,data2-np.mean(data2),'r',lw=.5,label='{}'.format(name))
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.legend()
ax.set_ylabel('Elevation (m)')
f.suptitle('Removed TG means - Obs: {} Model: {}\n Bias: {} Std: {} RMSE: {} RAE: {} Corr: {} Skew: {} Skill: {}'.format(np.mean(data1),np.mean(data2),a[0],a[1],a[2],a[3],a[4],a[5],a[6]))
f.savefig(figstr,dpi=600)
if tide:
time=np.arange(time1[0],time1[-1]+1/24.0,1/24.0)
tgm_int=ipt.interp1d(tgm['time'],tgm['zeta'],time)
tgonan=tgo['zeta'][idx]
tgonan[tgonan>500]=np.nan
tgo_int=ipt.interp1d(tgo['time'][idx],tgonan,time)
tgm_tcon_pre=ttide.t_tide(tgm_int,stime=time[0],lat=tgm['lat'],dt=(time[1]-time[0])*24.0,out_style=None)
tgo_tcon_pre=ttide.t_tide(tgo_int,stime=time[0],lat=tgm['lat'],dt=(time[1]-time[0])*24.0,out_style=None)
tgm_tcon=ttide.t_tide(tgm_int,stime=time[0],lat=tgm['lat'],dt=(time[1]-time[0])*24.0,constitnames=tgm_tcon_pre['nameu'][tgm_tcon_pre['snr']>=args.snr],out_style=None)
tgo_tcon=ttide.t_tide(tgo_int,stime=time[0],lat=tgm['lat'],dt=(time[1]-time[0])*24.0,constitnames=tgo_tcon_pre['nameu'][tgo_tcon_pre['snr']>=args.snr],out_style=None)
f=plt.figure(figsize=(15,5));
ax=f.add_axes([.125,.1,.775,.8]);
ax.plot(time[:len(tgo_tcon['xres'])],tgo_tcon['xres']-np.nanmean(tgo_tcon['xres']),'k',label='TG: {:05d}'.format(tgm['tgnumber'][0]))
ax.plot(time[:len(tgm_tcon['xres'])],tgm_tcon['xres']-np.nanmean(tgm_tcon['xres']),'r',lw=.5,label='{}'.format(name))
ax.xaxis.set_major_locator(months)
ax.xaxis.set_major_formatter(monthsFmt)
ax.legend()
ax.set_ylabel('Residual Elevation (m)')
o,m=remove_common_nan(tgo_tcon['xres']-np.nanmean(tgo_tcon['xres']), tgm_tcon['xres']-np.nanmean(tgm_tcon['xres']))
stats=residual_stats(o,m)
a=pd.DataFrame(stats,index=[0]).round(2).T[0]
f.suptitle('Removed TG means - Obs: {} Model: {}\n Bias: {} Std: {} RMSE: {} RAE: {} Corr: {} Skew: {} Skill: {}'.format(np.nanmean(tgo_tcon['xres']),np.nanmean(tgm_tcon['xres']),a[0],a[1],a[2],a[3],a[4],a[5],a[6]))
f.savefig(figstr2,dpi=600)
df=pd.DataFrame(tgm_tcon['tidecon'],columns=['Amp','AmpE','Phase','PhaseE'],index=tgm_tcon['nameu']).round(2).sort_values('Amp',ascending=False)
df.to_csv('{}_model_full.csv'.format(figstr3))
df=pd.DataFrame(tgo_tcon['tidecon'],columns=['Amp','AmpE','Phase','PhaseE'],index=tgo_tcon['nameu']).round(2).sort_values('Amp',ascending=False)
df.to_csv('{}_obs_full.csv'.format(figstr3))
namesm=tgm_tcon['nameu']
cnames=np.array([])
for namea in namesm:
if namea in tgo_tcon['nameu']:
cnames=np.append(cnames,namea)
oidx=np.in1d(tgo_tcon['nameu'],cnames)
midx=np.in1d(tgm_tcon['nameu'],cnames)
diff=np.vstack([tgo_tcon['tidecon'][oidx,0],tgm_tcon['tidecon'][midx,0],tgo_tcon['tidecon'][oidx,0]-tgm_tcon['tidecon'][midx,0],
tgo_tcon['tidecon'][oidx,2],tgm_tcon['tidecon'][midx,2],tgo_tcon['tidecon'][oidx,2]-tgm_tcon['tidecon'][midx,2]]).T
df=pd.DataFrame(diff,columns=['AmpObs','AmpMod','AmpDiff','PhaseObs','PhaseMod','PhaseDiff'],index=cnames).round(2).sort_values('AmpObs',ascending=False)
df.to_csv('{}_obsmod_common_diff.csv'.format(figstr3))
#kill
| [
"[email protected]"
] | |
4c3ab23c18f9d4491755f6abf41148a2ed42fc82 | c4702d1a06640555829b367852138cc93ba4a161 | /dym_bank_trf_request/wizard/bank_trf_advice_group_old.py | 6ef06d317a4c2cef785790f379608629ac9eeabb | [] | no_license | Rizalimami/dym | 0ecadf9c049b22ebfebf92e4eab6eaad17dd3e26 | af1bcf7b77a3212bc8a8a0e41e6042a134587ed4 | refs/heads/master | 2020-04-08T10:56:43.605698 | 2018-11-27T06:44:08 | 2018-11-27T06:44:08 | 159,287,876 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,459 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class BankTrfRequestGroup(osv.osv_memory):
_name = "bank.trf.request.group"
_description = "Bank Transfer Request Grup"
def fields_view_get(self, cr, uid, view_id=None, view_type='form',
context=None, toolbar=False, submenu=False):
"""
Changes the view dynamically
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return: New arch of view.
"""
if context is None:
context={}
res = super(BankTrfRequestGroup, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar,submenu=False)
if context.get('active_model','') == 'bank.trf.request' and len(context['active_ids']) < 2:
raise osv.except_osv(_('Warning!'),
_('Please select multiple order to merge in the list view.'))
return res
def merge_trf_requests(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: purchase order view
"""
trf_req_obj = self.pool.get('bank.trf.request')
# proc_obj = self.pool.get('procurement.order')
mod_obj =self.pool.get('ir.model.data')
if context is None:
context = {}
result = mod_obj._get_id(cr, uid, 'dym_bank_trf_request', 'bank_trf_request_search_view')
id = mod_obj.read(cr, uid, result, ['res_id'])
# allorders = trf_req_obj.do_merge(cr, uid, context.get('active_ids',[]), context)
allorders = []
return {
'domain': "[('id','in', [" + ','.join(map(str, allorders.keys())) + "])]",
'name': _('Bank Transfer Request Group'),
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'bank.trf.request',
'view_id': False,
'type': 'ir.actions.act_window',
'search_view_id': id['res_id']
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
4b1c156a5fbd8b1083a31472220fdd8c0b7d4e3a | cc6e1cce2f0d7fa8eb16f2dc3e90d60575aeac66 | /uploader/models.py | 1671ef86d98332e6ced4177a5d9084b8f038ada0 | [] | no_license | andysitu/p_site | 84bd0fa600593a91ea9f67ca9460e0fa4b633049 | 257386bdf792ea867dbbd9905c7245695ab55a6b | refs/heads/master | 2023-06-21T16:30:21.423414 | 2019-06-26T19:21:56 | 2019-06-26T19:21:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,356 | py | from django.db import models
import django, os
from django.db.models.signals import pre_delete
from django.dispatch.dispatcher import receiver
from django.conf import settings
from django.contrib.auth.models import User
class UFileManager(models.Model):
name = models.CharField(max_length = 50)
count = models.IntegerField(default=0)
class UFile(models.Model):
filename = models.CharField(max_length=50)
uploaded_date = models.DateTimeField(default=django.utils.timezone.now)
file_manager = models.ForeignKey(UFileManager, on_delete=models.CASCADE)
file_extensions = models.CharField(max_length=10, default=".txt")
def __str__(self):
return self.filename
def get_filepath(self):
folder_name = str(self.file_manager.id)
filepath = os.path.join(settings.MEDIA_ROOT, "uploader", folder_name, str(self.id) + self.file_extensions)
return filepath
@receiver(pre_delete, sender=User)
def delete_file(sender, instance, using, **kwargs):
print("HI")
try:
filepath = instance.get_filepath()
os.remove(filepath)
console.log("removed file")
except FileNotFoundError:
pass
class Note(models.Model):
text = models.TextField(max_length=200)
file_manager = models.ForeignKey(UFileManager, on_delete=models.CASCADE) | [
"[email protected]"
] | |
278010849f6c888e86cd9237c60ee0f61c668fd9 | 9bd687b5454ca7d2b4deb0e149ec7023b2f3b89e | /ebikes/lora/rfm/ll/__init__.py | 3ca2c4bfa45a8a729a51f15a13480468039889bb | [] | no_license | AlbertoFDR/EBikes-IoT | 57132ff8b059b6d2e5185e241afe7720f96b667f | cd5da02d96ccedb57a9fd3e76d4430a11fd4f4fd | refs/heads/master | 2022-12-10T14:02:22.468032 | 2020-02-11T12:37:59 | 2020-02-11T12:37:59 | 225,611,209 | 3 | 0 | null | 2021-06-02T00:45:47 | 2019-12-03T12:11:18 | Python | UTF-8 | Python | false | false | 760 | py | """
LinkLayer submodule
"""
__author__ = """Alexander Krause <[email protected]>"""
__date__ = "2016-12-28"
__version__ = "0.1.0"
__license__ = "GPL"
class Prototype:
conf = None
PL = None
def __init__(self, cfg, pl):
self.conf = cfg
self.PL = pl
self.PL.setIRQH(self._handleIRQ)
self.postInit()
def _handleIRQ(self):
pass
def get(conf, pl=None):
"""
get a new LinkLayer instance, depending on config
if a PhysicalLayer is given, it's added to the LinkLayer
"""
if conf["type"] in ["rfm9x", "rfm95", "rfm96", "rfm97", "rfm98"]:
from .ll_rfm9x import LinkLayer
else:
print("unsupported type")
return None
return LinkLayer(conf, pl)
| [
"[email protected]"
] | |
0faaab6550929001312d1c69b5cd7335b0237141 | e262e64415335060868e9f7f73ab8701e3be2f7b | /test_api2/api/test_wen.py | 30802898bb6b9118e58bbed576fa44ca15d6289a | [] | no_license | Allison001/developer_test | 6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63 | b8e04b4b248b0c10a35e93128a5323165990052c | refs/heads/master | 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,273 | py | # from json import loads
# def read_line():
# with open("/Users/yeahmobi/Desktop/work/python/developer/test_api2/api/wenjian.yaml",'r') as f:
# n = f.readlines()
# count =0
# flag = True
# for i in n:
# i = i.strip() #去除行前后的空格
# if i.startswith("#"):
# continue
# elif i == "":
# continue
# elif i[0:3] == '"""':
# continue
# elif i.startswith("'''"):
# continue
# elif i.startswith('"""') and i.endswith('"""'):
# continue
# else:
# count += 1
#
# if i == "'''" or i == '"""':
# if flag == True:
# flag = False
# continue
# else:
# flag = True
# continue
# elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# continue
# elif i.startswith("'''") or i.startswith('"""') or i.endswith("'''") or i.endswith('"""'):
# if flag == True:
# flag = False
# continue
# else:
# flag = True
# continue
# else:
# count += 1
# print(count)
#
#
# read_line()
# def count_line_core(file_name): ##传入单个文件,统计行数,之后返回该文件的实际代码行数;区分utf-8、gbk有待优化
# # print('core_file_name:',file_name)
# lines_count=0
# flag=True
# # try:
# # with open(file_name,'r',encoding='gbk') as fp:
# # # print('gbk file_name:',file_name)
# # for i in fp:
# # i=i.strip()
# # if i=="'''" or i=='"""':
# # if flag==True:
# # flag=False
# # continue
# # else:
# # flag=True
# # continue
# # elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# # continue
# # elif i.startswith("'''") or i.startswith('"""') or i.endswith("'''") or i.endswith('"""'):
# # if flag==True:
# # flag=False
# # continue
# # else:
# # flag=True
# # continue
# # if flag==True and i!='' and not i.startswith('#'):
# # lines_count+=1
# # #print(i)
# # if i.startswith('#-*-') or i.startswith('#coding') or i.startswith('#encoding'):
# # lines_count+=1
# # #print(i)
# # except:
# with open(file_name,'r',encoding='utf-8') as fp:
# # print('utf-8 file_name:',file_name)
# for i in fp:
# i=i.strip()
# if i=="'''" or i=='"""':
# if flag==True:
# flag=False
# continue
# else:
# flag=True
# continue
# elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# continue
# elif i.startswith("'''") or i.startswith('"""') or i.endswith("'''") or i.endswith('"""'):
# if flag==True:
# flag=False
# continue
# else:
# flag=True
# continue
# if flag==True and i!='' and not i.startswith('#'):
# lines_count+=1
# #print(i)
# if i.startswith('#-*-') or i.startswith('#coding') or i.startswith('#encoding'):
# lines_count+=1
# #print(i)
# return lines_count
# def count_line_core(file_name): ##传入单个文件,统计行数,之后返回该文件的实际代码行数;区分utf-8、gbk有待优化
# # print('core_file_name:',file_name)
# lines_count=0
# flag=True
# with open(file_name,'r',encoding='utf-8') as fp:
# # print('utf-8 file_name:',file_name)
# for i in fp:
# i=i.strip()
# if i=="'''" or i=='"""':
# if flag==True:
# flag=False
# continue
# else:
# flag=True
# continue
# continue
# elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# continue
# # elif i.startswith("'''") or i.startswith('"""') or i.endswith("'''") or i.endswith('"""'):
# # if flag==True:
# # flag=False
# # continue
# # else:
# # flag=True
# # continue
#
# if flag==True and i!='' and not i.startswith('#'):
# lines_count+=1
# print(i)
# # if i.startswith('#-*-') or i.startswith('#coding') or i.startswith('#encoding'):
# # lines_count+=1
# # print(i)
# return lines_count
#
# print(count_line_core('/Users/yeahmobi/Desktop/work/python/developer/test_api2/api/wenjian.yaml'))
"""
这是一个统计代码行数的函数
"""
# def count_line(filename): #函数名
# with open(filename) as f:
# flag = True
# count = 0
# for i in f.readlines():
# i = i.strip()
# if i == '"""' or i == "'''":
# if flag == True:
# flag = False
# continue
# else:
# flag = True
# continue
#
# elif (i.startswith("'''") and i.endswith("'''")) or (i.startswith('"""') and i.endswith('"""')):
# continue
# elif i !='' and flag == True and i[0:1] != "#":
# count+=1
# print(i)
# return count
#
# print(count_line('/Users/yeahmobi/Desktop/work/python/developer/test_api2/api/test_wen.py'))
# class Solution:
# def reverse(self, x: int) -> int:
# s = int(str(abs(x))[::-1])
# if s.bit_length() > 31:
# return 0
# else:
# if x >=0:
# return s
# else:
# return -s
# a = Solution().reverse(120)
# print(a)
# class Solution:
# def reverse(self, x: int) -> int:
# if x >=0:
# a = int(str(x)[::-1])
# else:
# a =0- int(str(x)[:0:-1])
#
#
# if (-2**31) <a < (2**31)-1:
# return a
# else:
# return 0
# class Solution:
# def solve(self , str ):
# # write code her
# str = list(str)
# print(str)
# l,r = 0,len(str)-1
# while l <=r:
# str[l],str[r] = str[r],str[l]
# l +=1
# r -=1
# return ''.join(str)
#
# a=Solution().solve('ancd')
# print(a)
# class Solution:
# def maxLength(self, arr):
# # write code here
# l, r = 0, 0
# stark = []
# n = 0
# while r < len(arr):
#
# if arr[r] in stark:
# l += 1
# r = l
# stark.clear()
#
# else:
# stark.append(arr[l])
# r += 1
# n = max(n, len(stark))
#
# return n
# class Solution:
# def maxLength(self , arr ):
# # write code here
# res=[]
# length=0
# for i in arr:
# if i not in res:
# res+=[i]
# else:
# res = res[res.index(i)+1:] + [i]
# if length<len(res): length= len(res)
# return length
# class Solution:
# def maxLength(self , arr ):
# # write code here
# l,stark =0,[] #定义一个l存储每次遇到重复的最大值,stark存储不重复的值
# for i in arr:
# if i in stark: #如果在stark中,就开始遍历
# l = max(l,len(stark))
# st = stark.index(i) #获取当前i(重复元素)在stark中的下标
# stark = stark[st+1:] #取当前stark中重复元素后的数
# stark.append(i)
# return max(l,len(stark))
#
# arr = [2,2,3,4,3]
# a = Solution().maxLength(arr)
# print(a)
# stark = [1,2,3,4]
# # st = stark.index(4)
# stark = stark[1:]
# print(stark)
# "1AB2345CD","12345EF"
class Solution:
def LCS(self , str1 , str2 ):
# l = 0
n = []
for i in range(len(str2)):
s = str2[:i+1]
if s in str1:
# l = max(l,len(s))
if not n:
n.append(s)
if len(s) > len(n[0]):
n.append(s)
else:
str2 = str2[i:]
return n[-1]
a = Solution().LCS("1AB2345CD","12345EF")
print(a)
# a = ['ancd']
# print(len(a[0])) | [
"[email protected]"
] | |
2b6c55c91181c7e97e176e24ad5f588c767be731 | 21bc908a1612e76a32f61d3e3e8865d3025e01f3 | /backend/manage.py | aed36335605846b8d24da47b5997edc90131d876 | [] | no_license | crowdbotics-apps/damp-sky-27690 | 422acfb8a5a6df8847d31c40dfcf2dc0ce7b1b7c | c6d0d7277a35d40363155773ed2e2860d5748449 | refs/heads/master | 2023-05-08T00:48:47.975234 | 2021-06-02T22:23:20 | 2021-06-02T22:23:20 | 373,317,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'damp_sky_27690.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
02c511b60cde23f482f156867d34247a278e9f14 | 78ed388a01610359d4554efa046e473a008ba1ae | /hdlConvertorAst/translate/verilog_to_basic_hdl_sim_model.py | 0e08a141d10a01eeda1d6dc9d129bc04cf50cc7b | [
"MIT"
] | permissive | mewais/hdlConvertorAst | f9ad85cfb2804c52a1b90642f4c9cede2ce2d3e6 | 64c8c1deee923ffae17e70e0fb1ad763cb69608c | refs/heads/master | 2022-12-09T12:01:23.150348 | 2020-09-06T04:10:15 | 2020-09-06T04:15:38 | 293,200,130 | 0 | 0 | MIT | 2020-09-06T04:03:17 | 2020-09-06T04:03:17 | null | UTF-8 | Python | false | false | 2,050 | py | from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model.\
add_unique_labels_to_all_processes import AddUniqueLabelsToAllProcesses
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.verilog_types_to_basic_hdl_sim_model import VerilogTypesToBasicHdlSimModel
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.wrap_module_statements_to_processes import wrap_module_statements_to_processes
from hdlConvertorAst.translate.common.discover_declarations import DiscoverDeclarations
from hdlConvertorAst.translate.vhdl_to_verilog import link_module_dec_def
from hdlConvertorAst.translate.common.name_scope import NameScope
from hdlConvertorAst.translate.common.resolve_names import ResolveNames
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.discover_stm_outputs import discover_stm_outputs_context
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.verilog_operands_to_basic_hdl_sim_model import BasicHdlSimModelTranslateVerilogOperands
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model\
.assignment_to_update_assignment import AssignmentToUpdateAssignment
from hdlConvertorAst.translate._verilog_to_basic_hdl_sim_model.apply_io_scope_to_signal_names import ApplyIoScopeToSignalNames
def verilog_to_basic_hdl_sim_model(context):
"""
:type context: HdlContext
"""
link_module_dec_def(context)
name_scope = NameScope.make_top(False)
DiscoverDeclarations(name_scope).visit_HdlContext(context)
ResolveNames(name_scope).visit_HdlContext(context)
wrap_module_statements_to_processes(context)
BasicHdlSimModelTranslateVerilogOperands().visit_HdlContext(context)
VerilogTypesToBasicHdlSimModel().visit_HdlContext(context)
stm_outputs = discover_stm_outputs_context(context)
AddUniqueLabelsToAllProcesses(name_scope, stm_outputs).context(context)
AssignmentToUpdateAssignment().visit_HdlContext(context)
ApplyIoScopeToSignalNames().visit_HdlContext(context)
return context, stm_outputs, name_scope
| [
"[email protected]"
] | |
f48a3d35ae9058c2debe98e42131250fe2204c6d | 8af54cf9b2f7edce0b5aa4fea5117ff4dc0ae4bf | /src/urls.py | e3e531e077ad7e7dd5b2bee85a323d7d3568877d | [] | no_license | chemalle/fopag | d9a3434ce83ebf215d54be825a531763f954b5e7 | d6d89aed7bed06bb0b88cac5efd037db4ed8f6b4 | refs/heads/master | 2020-03-10T08:15:19.225631 | 2018-04-12T16:30:34 | 2018-04-12T16:30:34 | 129,281,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,135 | py | """fopag URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.contrib import admin
from django.conf.urls.static import static
from django.conf import settings
from django.contrib.auth import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'', include('folha.urls')),
url(r'^accounts/login/$', views.login, name='login'),
url(r'^accounts/logout/$', views.logout, name='logout', kwargs={'next_page': '/'}),
] + static (settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
] | |
889561373222e776f285c46bed462a03db1dce83 | d5f8ca3c13f681d147b7614f1902df7ba34e06f9 | /CelebA/main.py | 1a920e6f5ac9b064598be6c2ab89096536d2adde | [] | no_license | hhjung1202/OwnAdaptation | 29a6c0a603ab9233baf293096fb9e7e956647a10 | 50805730254419f090f4854387be79648a01fbb4 | refs/heads/master | 2021-06-25T22:31:15.437642 | 2020-11-26T18:19:55 | 2020-11-26T18:19:55 | 176,670,379 | 1 | 0 | null | 2020-06-11T07:35:55 | 2019-03-20T06:36:19 | Python | UTF-8 | Python | false | false | 6,344 | py | import argparse
import torch
from torch.autograd import Variable
from torchvision.utils import save_image
import numpy as np
from model import *
import os
import torch.backends.cudnn as cudnn
import time
import utils
import dataset
import math
parser = argparse.ArgumentParser(description='PyTorch Cycle Domain Adaptation Training')
parser.add_argument('--sd', default='CelebA', type=str, help='source dataset')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)')
parser.add_argument('--epoch', default=164, type=int, metavar='N', help='number of total epoch to run')
parser.add_argument('--decay-epoch', default=30, type=int, metavar='N', help='epoch from which to start lr decay')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('-b', '--batch-size', default=128, type=int, metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=1e-2, type=float, metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--img-size', type=int, default=32, help='input image width, height size')
parser.add_argument('--dir', default='./', type=str, help='default save directory')
parser.add_argument('--gpu', default='0', type=str, help='Multi GPU ids to use.')
best_prec_result = torch.tensor(0, dtype=torch.float32)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
torch.manual_seed(args.seed)
cuda = True if torch.cuda.is_available() else False
FloatTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
LongTensor = torch.cuda.LongTensor if cuda else torch.LongTensor
criterion = torch.nn.CrossEntropyLoss()
def main():
global args, best_prec_result
start_epoch = 0
utils.default_model_dir = args.dir
start_time = time.time()
train_loader, test_loader = dataset_selector(args.sd)
state_info = utils.model_optim_state_info()
state_info.model_init(args=args, num_class=4000)
state_info.model_cuda_init()
state_info.weight_init()
state_info.optimizer_init(args)
if cuda:
print("USE", torch.cuda.device_count(), "GPUs!")
cudnn.benchmark = True
checkpoint = utils.load_checkpoint(utils.default_model_dir, is_last=True)
if checkpoint:
start_epoch = checkpoint['epoch'] + 1
best_prec_result = checkpoint['Best_Prec']
state_info.load_state_dict(checkpoint)
for epoch in range(0, args.epoch):
if epoch < 80:
lr = args.lr
elif epoch < 122:
lr = args.lr * 0.1
else:
lr = args.lr * 0.01
for param_group in state_info.optimizer.param_groups:
param_group['lr'] = lr
train(state_info, train_loader, epoch)
prec_result = test(state_info, test_loader, epoch)
if prec_result > best_prec_result:
best_prec_result = prec_result
filename = 'checkpoint_best.pth.tar'
utils.save_state_checkpoint(state_info, best_prec_result, filename, utils.default_model_dir, epoch)
utils.print_log('Best Prec : {:.4f}'.format(best_prec_result.item()))
filename = 'latest.pth.tar'
utils.save_state_checkpoint(state_info, best_prec_result, filename, utils.default_model_dir, epoch)
now = time.gmtime(time.time() - start_time)
utils.print_log('Best Prec : {:.4f}'.format(best_prec_result.item()))
utils.print_log('{} hours {} mins {} secs for training'.format(now.tm_hour, now.tm_min, now.tm_sec))
print('done')
def train(state_info, train_loader, epoch): # all
utils.print_log('Type, Epoch, Batch, loss, total_loss, Percent')
state_info.set_train_mode()
correct = torch.tensor(0, dtype=torch.float32)
total = torch.tensor(0, dtype=torch.float32)
train_loss = 0
for it, [x, y] in enumerate(train_loader):
x, y = to_var(x, FloatTensor), to_var(y, LongTensor)
output = state_info.forward(x)
# Train
state_info.optimizer.zero_grad()
loss = criterion(output, y)
loss.backward()
state_info.optimizer.step()
# Log Print
train_loss += loss.data.item()
total += float(y.size(0))
_, predicted = torch.max(output.data, 1)
correct += float(predicted.eq(y.data).cpu().sum())
if it % 10 == 0:
utils.print_log('Train, {}, {}, {:.6f}, {:.4f}, {:.2f}'
.format(epoch, it, loss.item(), train_loss, 100.*correct / total))
print('Train, {}, {}, {:.6f}, {:.4f}, {:.2f}'
.format(epoch, it, loss.item(), train_loss, 100.*correct / total))
utils.print_log('')
def test(state_info, test_loader, epoch):
utils.print_log('Type, Epoch, Acc')
state_info.set_test_mode()
correct = torch.tensor(0, dtype=torch.float32)
total = torch.tensor(0, dtype=torch.float32)
for it, [x, y] in enumerate(test_loader):
x, y = to_var(x, FloatTensor), to_var(y, LongTensor)
output = state_info.forward(x)
# Log Print
total += float(y.size(0))
_, predicted = torch.max(output.data, 1)
correct += float(predicted.eq(y.data).cpu().sum())
utils.print_log('Test, {}, {:.2f}'.format(epoch, 100.*correct / total))
print('Test, {}, {:.2f}'.format(epoch, 100.*correct / total))
utils.print_log('')
return 100.*correct / total
def dataset_selector(data):
if data == 'mnist':
return dataset.MNIST_loader(img_size=args.img_size)
elif data == 'svhn':
return dataset.SVHN_loader(img_size=32)
elif data == "usps":
return dataset.usps_loader(img_size=args.img_size)
elif data == "mnistm":
return dataset.MNIST_M_loader(img_size=args.img_size)
elif data == "cifar10":
return dataset.cifar10_loader(args)
elif data == "CelebA":
return dataset.CelebA_loader(image_size=args.img_size, batch_size=args.batch_size)
def to_var(x, dtype):
return Variable(x.type(dtype))
if __name__=='__main__':
main() | [
"[email protected]"
] | |
d0dd8c0f79d16b37610b0f645641720c3a87dc5b | 347c70d4851b568e03e83387f77ae81071ab739e | /fn_splunk_integration/tests/test_function_utils.py | 0c9e09a2c07c9c3e0f49d16aed5e0ed0666a3c55 | [
"MIT"
] | permissive | neetinkandhare/resilient-community-apps | 59d276b5fb7a92872143ce2b94edd680738693ce | 3ecdabe6bf2fc08f0f8e58cbe92553270d8da42f | refs/heads/master | 2021-12-27T09:05:36.563404 | 2021-09-29T13:04:56 | 2021-09-29T13:04:56 | 159,804,866 | 1 | 0 | MIT | 2021-08-03T19:45:45 | 2018-11-30T10:07:32 | Python | UTF-8 | Python | false | false | 1,628 | py |
#
# Unit tests for fn_splunk_integration/components/function_utils.py
#
# 100% code coverage
#
#
import unittest
import sys
sys.path.append("../fn_splunk_integration/util")
sys.path.append("fn_splunk_integration/util")
from function_utils import make_query_string
from function_utils import make_item_dict
from function_utils import ItemDataError
def test_query_string():
print("Testing query string substitution....")
input_string = "index = %param1% source=%param2% AND %param3%=%param4%"
params = ["_internal", "*splunkd*", "clientip", "127.0.0.1"]
query = make_query_string(input_string, params)
assert query == "index = _internal source=*splunkd* AND clientip=127.0.0.1"
def test_make_item_dict():
print("Testing make_item_dict")
params = ["field1", "value1",
"field2", "value2",
"field3", "value3"]
item_dict = make_item_dict(params)
assert item_dict["field1"] == "value1" and item_dict["field2"] == "value2" and item_dict["field3"] == "value3"
# Test wrong number of params
try:
make_item_dict(["p1","p2","p3"])
assert False
except ItemDataError:
assert True
# Test null key
try:
item_dict = make_item_dict(["p1", "p2",
None, "p4",
"p5", "p6"])
assert item_dict["p1"] == "p2" and item_dict["p5"] == "p6"
assert "p4" not in item_dict
except ItemDataError:
assert False
# Test null value
try:
item_dict = make_item_dict(["p1", None])
assert not item_dict["p1"]
except:
assert False
| [
"[email protected]"
] | |
d00dbd97b58fc1d1199f2fc36746e9223ddfeea0 | 39b0d9c6df77671f540c619aff170441f953202a | /default program/descriptor_method1.py | 18d5b5dc9831d6210a3cfa6fd591f3a965cd7de1 | [] | no_license | yeboahd24/Python201 | e7d65333f343d9978efff6bf86ce0447d3a40d70 | 484e66a52d4e706b8478473347732e23998c93c5 | refs/heads/main | 2023-02-06T10:24:25.429718 | 2020-12-26T01:08:04 | 2020-12-26T01:08:04 | 306,487,550 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | #!usr/bin/env/python3
class DescriptorClass(object):
"""All know that descriptor attributes should be in the class not the __init__
instance--> this is the instance of your class, so in this case test and test1 becomes our instance
owner--> this the name of class of the instance, ClientClass is now our owner here
"""
def __get__(self, instance, owner):
if instance is None: # don't forget to add this
return f"{self.__class__.__name__}.{owner.__name__}"
return f"value for {instance}"
class ClientClass(object):
descriptor = DescriptorClass()
test = ClientClass.descriptor # calling ClientClass directly
test1 = ClientClass().descriptor
print(test)
print(test1)
| [
"[email protected]"
] | |
c5000324a37133b8e3e2bad62736b29664f711fd | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03659/s495769033.py | 54ee8453e730f35341ffac0335267d937fc39396 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 224 | py | import numpy as np
n = int(input())
a = np.array(list(map(int, input().split())))
cumsum_a = a.cumsum()
sum_a = cumsum_a[-1]
ans = 2 * 10**9
for i in range(n-1):
ans = min(ans, abs(sum_a - 2*cumsum_a[i]))
print(ans)
| [
"[email protected]"
] | |
7e3abe5ff2836f61260cff4e091e0e15a6e5aa06 | 0966fc5e479f7dd86683fd2d961e44bb4f71a614 | /splatify/views.py | 8b506aeb27b9322d1943be7e2675565ce5510105 | [] | no_license | micnem/splatify2 | 112972616f6216598791df6b025c2de7be020281 | a90328fbf79667ebe10a028a66c49334c840ae57 | refs/heads/main | 2023-02-10T11:20:48.570326 | 2021-01-06T14:14:08 | 2021-01-06T14:14:08 | 327,318,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 891 | py | from django.shortcuts import render
from .spopulate import get_top_artists, create_playlist, match, main
from .models import *
def check_profile(profile):
if not profile.populated:
get_top_artists(profile)
def homepage(request):
return render(request, 'homepage.html')
def room(request):
check_profile(request.user.profile)
users = User.objects.all()
return render(request, 'room.html', {'users': users})
def show_top_artists(request):
return render(request,'top_artists.html')
def splat(request, user_id):
user2 = User.objects.get(id=user_id)
master_list = match([request.user, user2])
playlist_id = main(master_list, request.user.profile, user2)
return render(request, 'result.html', {'playlist_id':playlist_id})
def play(request, playlist_id):
return render(request, 'play.html', {'playlist_id':playlist_id}) | [
"[email protected]"
] | |
87cb6e6e0b682d52f6eaaa096b0a13f7c53bb789 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/8/udw.py | 95de41ebfb832ef789c650110a4ac9b22d6e3fbf | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'uDW':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
95de76dbf85e358fc7d4c5589e293bd48b8d7d27 | b148cda05d07895b97f5dbc29d06999ffb4d1b33 | /sonic-pcied/tests/test_DaemonPcied.py | 2c3c953e7e483aaec37fc1251ee3d54cd23d1fbc | [
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] | permissive | keboliu/sonic-platform-daemons | 0fb6ce76902ec0b6942cd3c1356c7586dacb8d58 | c7cbbb8db5b74d3eddcedd733000d5232006911e | refs/heads/master | 2023-08-31T00:05:25.760285 | 2022-08-09T20:13:59 | 2022-08-09T20:13:59 | 139,558,345 | 0 | 1 | NOASSERTION | 2020-03-26T10:19:47 | 2018-07-03T09:15:09 | Python | UTF-8 | Python | false | false | 8,620 | py | import datetime
import os
import sys
from imp import load_source # Replace with importlib once we no longer need to support Python 2
import pytest
# TODO: Clean this up once we no longer need to support Python 2
if sys.version_info.major == 3:
from unittest import mock
else:
import mock
from .mock_platform import MockPcieUtil
SYSLOG_IDENTIFIER = 'pcied_test'
NOT_AVAILABLE = 'N/A'
tests_path = os.path.dirname(os.path.abspath(__file__))
# Add mocked_libs path so that the file under test can load mocked modules from there
mocked_libs_path = os.path.join(tests_path, "mocked_libs")
sys.path.insert(0, mocked_libs_path)
from sonic_py_common import daemon_base
daemon_base.db_connect = mock.MagicMock()
# Add path to the file under test so that we can load it
modules_path = os.path.dirname(tests_path)
scripts_path = os.path.join(modules_path, "scripts")
sys.path.insert(0, modules_path)
load_source('pcied', os.path.join(scripts_path, 'pcied'))
import pcied
pcie_no_aer_stats = \
"""
{'correctable': {}, 'fatal': {}, 'non_fatal': {}}
"""
pcie_aer_stats_no_err = {'correctable': {'field1': '0', 'field2': '0'},
'fatal': {'field3': '0', 'field4': '0'},
'non_fatal': {'field5': '0', 'field6': '0'}}
pcie_aer_stats_err = \
"""
{'correctable': {'field1': '1', 'field2': '0'},
'fatal': {'field3': '0', 'field4': '1'},
'non_fatal': {'field5': '0', 'field6': '1'}}
"""
pcie_device_list = \
"""
[{'bus': '00', 'dev': '01', 'fn': '0', 'id': '1f10', 'name': 'PCI A'},
{'bus': '00', 'dev': '02', 'fn': '0', 'id': '1f11', 'name': 'PCI B'},
{'bus': '00', 'dev': '03', 'fn': '0', 'id': '1f13', 'name': 'PCI C'}]
"""
pcie_check_result_no = []
pcie_check_result_pass = \
"""
[{'bus': '00', 'dev': '01', 'fn': '0', 'id': '1f10', 'name': 'PCI A', 'result': 'Passed'},
{'bus': '00', 'dev': '02', 'fn': '0', 'id': '1f11', 'name': 'PCI B', 'result': 'Passed'},
{'bus': '00', 'dev': '03', 'fn': '0', 'id': '1f12', 'name': 'PCI C', 'result': 'Passed'}]
"""
pcie_check_result_fail = \
"""
[{'bus': '00', 'dev': '01', 'fn': '0', 'id': '1f10', 'name': 'PCI A', 'result': 'Passed'},
{'bus': '00', 'dev': '02', 'fn': '0', 'id': '1f11', 'name': 'PCI B', 'result': 'Passed'},
{'bus': '00', 'dev': '03', 'fn': '0', 'id': '1f12', 'name': 'PCI C', 'result': 'Failed'}]
"""
class TestDaemonPcied(object):
"""
Test cases to cover functionality in DaemonPcied class
"""
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_signal_handler(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.stop_event.set = mock.MagicMock()
daemon_pcied.log_info = mock.MagicMock()
daemon_pcied.log_warning = mock.MagicMock()
# Test SIGHUP
daemon_pcied.signal_handler(pcied.signal.SIGHUP, None)
assert daemon_pcied.log_info.call_count == 1
daemon_pcied.log_info.assert_called_with("Caught signal 'SIGHUP' - ignoring...")
assert daemon_pcied.log_warning.call_count == 0
assert daemon_pcied.stop_event.set.call_count == 0
assert pcied.exit_code == 0
# Reset
daemon_pcied.log_info.reset_mock()
daemon_pcied.log_warning.reset_mock()
daemon_pcied.stop_event.set.reset_mock()
# Test SIGINT
test_signal = pcied.signal.SIGINT
daemon_pcied.signal_handler(test_signal, None)
assert daemon_pcied.log_info.call_count == 1
daemon_pcied.log_info.assert_called_with("Caught signal 'SIGINT' - exiting...")
assert daemon_pcied.log_warning.call_count == 0
assert daemon_pcied.stop_event.set.call_count == 1
assert pcied.exit_code == (128 + test_signal)
# Reset
daemon_pcied.log_info.reset_mock()
daemon_pcied.log_warning.reset_mock()
daemon_pcied.stop_event.set.reset_mock()
# Test SIGTERM
test_signal = pcied.signal.SIGTERM
daemon_pcied.signal_handler(test_signal, None)
assert daemon_pcied.log_info.call_count == 1
daemon_pcied.log_info.assert_called_with("Caught signal 'SIGTERM' - exiting...")
assert daemon_pcied.log_warning.call_count == 0
assert daemon_pcied.stop_event.set.call_count == 1
assert pcied.exit_code == (128 + test_signal)
# Reset
daemon_pcied.log_info.reset_mock()
daemon_pcied.log_warning.reset_mock()
daemon_pcied.stop_event.set.reset_mock()
pcied.exit_code = 0
# Test an unhandled signal
daemon_pcied.signal_handler(pcied.signal.SIGUSR1, None)
assert daemon_pcied.log_warning.call_count == 1
daemon_pcied.log_warning.assert_called_with("Caught unhandled signal 'SIGUSR1' - ignoring...")
assert daemon_pcied.log_info.call_count == 0
assert daemon_pcied.stop_event.set.call_count == 0
assert pcied.exit_code == 0
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_run(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.check_pcie_devices = mock.MagicMock()
daemon_pcied.run()
assert daemon_pcied.check_pcie_devices.call_count == 1
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_check_pcie_devices(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.update_pcie_devices_status_db = mock.MagicMock()
daemon_pcied.check_n_update_pcie_aer_stats = mock.MagicMock()
pcied.platform_pcieutil.get_pcie_check = mock.MagicMock()
daemon_pcied.check_pcie_devices()
assert daemon_pcied.update_pcie_devices_status_db.call_count == 1
assert daemon_pcied.check_n_update_pcie_aer_stats.call_count == 0
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_update_pcie_devices_status_db(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.status_table = mock.MagicMock()
daemon_pcied.log_info = mock.MagicMock()
daemon_pcied.log_error = mock.MagicMock()
# test for pass resultInfo
daemon_pcied.update_pcie_devices_status_db(0)
assert daemon_pcied.status_table.set.call_count == 1
assert daemon_pcied.log_info.call_count == 1
assert daemon_pcied.log_error.call_count == 0
daemon_pcied.status_table.set.reset_mock()
daemon_pcied.log_info.reset_mock()
# test for resultInfo with 1 device failed to detect
daemon_pcied.update_pcie_devices_status_db(1)
assert daemon_pcied.status_table.set.call_count == 1
assert daemon_pcied.log_info.call_count == 0
assert daemon_pcied.log_error.call_count == 1
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
@mock.patch('pcied.read_id_file')
def test_check_n_update_pcie_aer_stats(self, mock_read):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.device_table = mock.MagicMock()
daemon_pcied.update_aer_to_statedb = mock.MagicMock()
pcied.platform_pcieutil.get_pcie_aer_stats = mock.MagicMock()
mock_read.return_value = None
daemon_pcied.check_n_update_pcie_aer_stats(0,1,0)
assert daemon_pcied.update_aer_to_statedb.call_count == 0
assert daemon_pcied.device_table.set.call_count == 0
assert pcied.platform_pcieutil.get_pcie_aer_stats.call_count == 0
mock_read.return_value = '1714'
daemon_pcied.check_n_update_pcie_aer_stats(0,1,0)
assert daemon_pcied.update_aer_to_statedb.call_count == 1
assert daemon_pcied.device_table.set.call_count == 1
assert pcied.platform_pcieutil.get_pcie_aer_stats.call_count == 1
@mock.patch('pcied.load_platform_pcieutil', mock.MagicMock())
def test_update_aer_to_statedb(self):
daemon_pcied = pcied.DaemonPcied(SYSLOG_IDENTIFIER)
daemon_pcied.log_debug = mock.MagicMock()
daemon_pcied.device_table = mock.MagicMock()
daemon_pcied.device_name = mock.MagicMock()
daemon_pcied.aer_stats = pcie_aer_stats_no_err
"""
mocked_expected_fvp = pcied.swsscommon.FieldValuePairs(
[("correctable|field1", '0'),
("correctable|field2", '0'),
("fatal|field3", '0'),
("fatal|field4", '0'),
("non_fatal|field5", '0'),
("non_fatal|field6", '0'),
])
"""
daemon_pcied.update_aer_to_statedb()
assert daemon_pcied.log_debug.call_count == 0
assert daemon_pcied.device_table.set.call_count == 1
daemon_pcied.device_table.set.reset_mock()
| [
"[email protected]"
] | |
38739ea4cae572570555cd1043b6acf10436f45e | 3eb4d64a8bb0bc240a2ef189724f4d51b5275eac | /heltour/tournament/migrations/0099_alternate_priority_date_override.py | 863952e8d18f8da8a170a0aae4967d562e598879 | [
"MIT"
] | permissive | brucemubayiwa/heltour | c01cc88be7f86dce8246f619d7aa2da37e0e0ac2 | fa4e9b06343acaf6a8a99337860e1ad433e68f6b | refs/heads/master | 2021-01-23T19:59:04.099215 | 2017-09-06T03:34:31 | 2017-09-06T03:34:31 | 102,840,526 | 1 | 0 | null | 2017-09-08T08:53:30 | 2017-09-08T08:53:30 | null | UTF-8 | Python | false | false | 482 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-09-19 01:15
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tournament', '0098_auto_20160916_1934'),
]
operations = [
migrations.AddField(
model_name='alternate',
name='priority_date_override',
field=models.DateTimeField(blank=True, null=True),
),
]
| [
"[email protected]"
] | |
c8e99972a246a077b466f45e66c23b688c79d040 | ea373d1b4296d16eaa1355972cccd28eaa336871 | /login-signup-Django/signup/views.py | 1ea7905bc7574d9d41102a129e6dab3e08283977 | [] | no_license | nazaninsbr/Web-Development | f1a03e3d26d79dda8a6f9978d443a62cc5b88b42 | 7821ec2596d1dff7c4f390e01ae7d90e3fdbf029 | refs/heads/master | 2021-05-02T16:05:09.508344 | 2018-04-27T18:20:01 | 2018-04-27T18:20:01 | 120,666,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,656 | py | from django.contrib.auth import login, authenticate
# from django.http import HttpResponse, JsonResponse
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from signup.serializers import SignupSerializer
import json
from rest_framework.parsers import JSONParser
from django.views.decorators.csrf import csrf_exempt
from rest_framework.authtoken.models import Token
from django.contrib.auth.hashers import make_password
from django.http import HttpResponseRedirect, HttpResponse, JsonResponse
from django.urls import reverse
# @login_required
# def home(request):
# return render(request, 'signup/home.html')
import logging
logger = logging.getLogger(__name__)
@csrf_exempt
def signup(request):
if request.method == 'GET':
response_data = {}
response_data['result'] = 'error'
response_data['message'] = 'You need to post something'
return HttpResponse(json.dumps(response_data), content_type="application/json")
if request.method == 'POST':
signupdata = JSONParser().parse(request)
serializer = SignupSerializer(data = signupdata)
if serializer.is_valid():
# serializer.save()
jsonfile = serializer.data
username = jsonfile["username"]
password = jsonfile["password"]
logger.info(username)
logger.info(password)
password = make_password(password, '1')
user = User(username=username, password=password)
user.save()
new_token = Token.objects.create(user=user)
new_token.save()
request.session["SoCkey"]=new_token.key
request.session.set_expiry(30000000)
login(request, user)
return JsonResponse({"key":new_token.key})
else:
return JsonResponse(serializer.errors)
# username = signupdata.cleaned_data.get('username')
# raw_password = signupdata.cleaned_data.get('password1')
# user = authenticate(username=username, password=raw_password)
# form = Signup(request.POST)
# if form.is_valid():
# form.save()
# username = form.cleaned_data.get('username')
# raw_password = form.cleaned_data.get('password1')
# user = authenticate(username=username, password=raw_password)
# login(request, user)
# return redirect('home')
# else:
# form = SignUpForm()
# return render(request, 'signup/signup.html', {'form': form})
| [
"[email protected]"
] | |
34a1c0235615920c69d66c20f7774fba3f391aa2 | eb9f655206c43c12b497c667ba56a0d358b6bc3a | /python/testData/quickFixes/PyPandasSeriesToListQuickFixTest/dataframeGetitem.py | ee17a810d951dd9ec0fdaef3088c7dab1cfb67d5 | [
"Apache-2.0"
] | permissive | JetBrains/intellij-community | 2ed226e200ecc17c037dcddd4a006de56cd43941 | 05dbd4575d01a213f3f4d69aa4968473f2536142 | refs/heads/master | 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 | Apache-2.0 | 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null | UTF-8 | Python | false | false | 351 | py | import pandas as pd
# DataFrame columns case
df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
list(df[['a', 'b']].values)
bb = ["a", "b", "c"]
list(df[bb].values)
# with errors
list(df.<error descr="Name expected">[</error>'a'].values)
<warning descr="Method Series.to_list() is recommended">list<caret>(df['a'].values)</warning> | [
"[email protected]"
] | |
f160cd861be4861d18ff058c4fe05ae1b02b5b5b | 69e318f2b60175108bc74ee669bfe16287a71cb6 | /plugins/modules/fortios_log_null_device_filter.py | 298792656963599543d5265bb94c284a6f6c4b5c | [] | no_license | chillancezen/ansible-galaxy-fortios-collection | 5268a5fd97fb4594772349b8d89cb818ec54b3bd | 66a331cd4493d1b0f49798d5c2cd6ef5aeba84d3 | refs/heads/master | 2022-04-09T19:20:59.073193 | 2020-03-26T07:17:09 | 2020-03-26T07:17:09 | 250,185,374 | 0 | 0 | null | 2020-03-26T07:06:16 | 2020-03-26T07:06:16 | null | UTF-8 | Python | false | false | 13,861 | py | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_log_null_device_filter
short_description: Filters for null device logging in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify log_null_device feature and filter category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
log_null_device_filter:
description:
- Filters for null device logging.
default: null
type: dict
suboptions:
anomaly:
description:
- Enable/disable anomaly logging.
type: str
choices:
- enable
- disable
dns:
description:
- Enable/disable detailed DNS event logging.
type: str
choices:
- enable
- disable
filter:
description:
- Null-device log filter.
type: str
filter_type:
description:
- Include/exclude logs that match the filter.
type: str
choices:
- include
- exclude
forward_traffic:
description:
- Enable/disable forward traffic logging.
type: str
choices:
- enable
- disable
gtp:
description:
- Enable/disable GTP messages logging.
type: str
choices:
- enable
- disable
local_traffic:
description:
- Enable/disable local in or out traffic logging.
type: str
choices:
- enable
- disable
multicast_traffic:
description:
- Enable/disable multicast traffic logging.
type: str
choices:
- enable
- disable
netscan_discovery:
description:
- Enable/disable netscan discovery event logging.
type: str
netscan_vulnerability:
description:
- Enable/disable netscan vulnerability event logging.
type: str
severity:
description:
- Lowest severity level to log.
type: str
choices:
- emergency
- alert
- critical
- error
- warning
- notification
- information
- debug
sniffer_traffic:
description:
- Enable/disable sniffer traffic logging.
type: str
choices:
- enable
- disable
ssh:
description:
- Enable/disable SSH logging.
type: str
choices:
- enable
- disable
voip:
description:
- Enable/disable VoIP logging.
type: str
choices:
- enable
- disable
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Filters for null device logging.
fortios_log_null_device_filter:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
log_null_device_filter:
anomaly: "enable"
dns: "enable"
filter: "<your_own_value>"
filter_type: "include"
forward_traffic: "enable"
gtp: "enable"
local_traffic: "enable"
multicast_traffic: "enable"
netscan_discovery: "<your_own_value>"
netscan_vulnerability: "<your_own_value>"
severity: "emergency"
sniffer_traffic: "enable"
ssh: "enable"
voip: "enable"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortios.plugins.module_utils.fortios.fortios import FortiOSHandler
from ansible_collections.fortinet.fortios.plugins.module_utils.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_log_null_device_filter_data(json):
option_list = ['anomaly', 'dns', 'filter',
'filter_type', 'forward_traffic', 'gtp',
'local_traffic', 'multicast_traffic', 'netscan_discovery',
'netscan_vulnerability', 'severity', 'sniffer_traffic',
'ssh', 'voip']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def log_null_device_filter(data, fos):
vdom = data['vdom']
log_null_device_filter_data = data['log_null_device_filter']
filtered_data = underscore_to_hyphen(filter_log_null_device_filter_data(log_null_device_filter_data))
return fos.set('log.null-device',
'filter',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_log_null_device(data, fos):
if data['log_null_device_filter']:
resp = log_null_device_filter(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"log_null_device_filter": {
"required": False, "type": "dict", "default": None,
"options": {
"anomaly": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"dns": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"filter": {"required": False, "type": "str"},
"filter_type": {"required": False, "type": "str",
"choices": ["include", "exclude"]},
"forward_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"gtp": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"local_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"multicast_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"netscan_discovery": {"required": False, "type": "str"},
"netscan_vulnerability": {"required": False, "type": "str"},
"severity": {"required": False, "type": "str",
"choices": ["emergency", "alert", "critical",
"error", "warning", "notification",
"information", "debug"]},
"sniffer_traffic": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"ssh": {"required": False, "type": "str",
"choices": ["enable", "disable"]},
"voip": {"required": False, "type": "str",
"choices": ["enable", "disable"]}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_log_null_device(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_log_null_device(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
ca6b3166f393338dabec04bc58f53131b6d65b8a | 177b66facda74108e693d0fe4e0be1cd8b3adc79 | /cell/test data.py | f552320e7e0631afc676614ecd295e8330064807 | [] | no_license | leizeling/my_learn | 04c0266adc319f5679c6db17ad4681a448def5eb | 3be0446d1a9e2d301d58f455261763231f1aa7d6 | refs/heads/master | 2020-03-19T04:12:32.196213 | 2018-06-07T14:51:39 | 2018-06-07T14:51:39 | 135,805,333 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,938 | py | # _*_ conding:utf-8 _*_
from __future__ import print_function
import os
import numpy as np
from skimage.io import imsave, imread
data_path = '/home/momoh/mabocombinedimgs22/'
image_rows = 420
image_cols = 580
def create_test_data2():
train_data_path = os.path.join(data_path, 'test')
images = os.listdir(train_data_path) #文件名列表
total = len(images) / 2
imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8) #np.ndarray中参数表示的是维度,默认值为零
imgs_mask = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
for image_name in images:
if 'mask' in image_name:
continue
image_mask_name = image_name.split('.')[0] + '_mask.jpg'
img = imread(os.path.join(train_data_path, image_name)) #(width,height,channel)
img_mask = imread(os.path.join(train_data_path, image_mask_name))
img =img[:,:,1] #(width,height)
img_mask=img_mask[:,:,1]
img = np.array([img]) #(1,width,height)
img_mask = np.array([img_mask])
imgs[i] = img #(i,1,width,height)
imgs_mask[i] = img_mask
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
print(total)
np.save('imgs_test.npy', imgs)
np.save('imgs_mask_test.npy', imgs_mask)
print('Saving to .npy files done.')
def create_train_data():
train_data_path = os.path.join(data_path, 'train')
images = os.listdir(train_data_path)
total = len(images) / 2
imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
imgs_mask = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
i = 0
print('-'*30)
print('Creating training images...')
print('-'*30)
for image_name in images:
if 'mask' in image_name:
continue
image_mask_name = image_name.split('.')[0] + '_mask.jpg'
img = imread(os.path.join(train_data_path, image_name))
img_mask = imread(os.path.join(train_data_path, image_mask_name))
img =img[:,:,1]
img_mask=img_mask[:,:,1]
img = np.array([img])
img_mask = np.array([img_mask])
imgs[i] = img
imgs_mask[i] = img_mask
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
print(total)
np.save('imgs_train.npy', imgs)
np.save('imgs_mask_train.npy', imgs_mask)
print('Saving to .npy files done.')
def load_train_data():
imgs_train = np.load('imgs_train.npy')
imgs_mask_train = np.load('imgs_mask_train.npy')
return imgs_train, imgs_mask_train
def create_test_data():
train_data_path = os.path.join(data_path, 'test')
images = os.listdir(train_data_path)
total = len(images)/2
imgs = np.ndarray((total, image_rows, image_cols), dtype=np.uint8)
imgs_id = np.ndarray((total, ), dtype=np.int32)
i = 0
print('-'*30)
print('Creating test images...')
print('-'*30)
for image_name in images:
if 'mask' in image_name:
continue
img_id = int(image_name.split('.')[0])#image_name
img = imread(os.path.join(train_data_path, image_name))
img =img[:,:,1]
img = np.array([img])
imgs[i] = img
imgs_id[i] = img_id
if i % 100 == 0:
print('Done: {0}/{1} images'.format(i, total))
i += 1
print('Loading done.')
np.save('imgs_test.npy', imgs)
np.save('imgs_id_test.npy', imgs_id)
print('Saving to .npy files done.')
def load_test_data():
imgs_test = np.load('imgs_test.npy')
imgs_mask_test = np.load('imgs_mask_test.npy')
imgs_id = np.load('imgs_id_test.npy')
return imgs_test, imgs_id,imgs_mask_test
if __name__ == '__main__':
#create_train_data()
create_test_data()
create_test_data2()
| [
"[email protected]"
] | |
c9b7f903cf66a083d05e34ebc1900c3906a73400 | 9c50f57a9cb32b44e86a0cdcbf61ead34754b085 | /杂物间/PycharmProjects/面向对象基础/bc_08_案例.py | a4de3dd88e831cda6088324ea0cfb9c0c0d834f7 | [] | no_license | a1403893559/rg201python | c3f115011981393c86a0150e5281096651712ad4 | 448f04c86e4c7fd30e3a2a4f9121b934ae1d49be | refs/heads/master | 2020-03-15T23:32:17.723403 | 2018-03-18T12:59:43 | 2018-03-18T12:59:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 673 | py | class Person:
"""人类"""
def __init__(self, name, weight):
# slef.属性 = 形参
self.name = name
self.weight = weight
def __str__(self):
# __str__方法必须返回一个字符串
return "我的名字叫%s 体重 %.2f 公斤 " % (self.name, self.weight)
def run(self):
"""跑步"""
print("%s 爱跑步,跑步锻炼身体" % self.name)
self.weight -= 0.5
def eat(self):
"""吃东西"""
print("%s 是吃货,吃完这顿在减肥" % self.name)
self.weight += 1
xiaoming = Person("小明", 75)
xiaoming.run()
xiaoming.eat()
xiaoming.eat()
print(xiaoming)
| [
"[email protected]"
] | |
087dc9ae865acae60ac24c9dfbd921703d209bdc | 6174de8df820463515c63425700eab7af643bb31 | /src/test_emb.py | eda27ed664cdbaef38b2a7a846cf3cb434713eec | [] | no_license | danielzgsilva/CL-MOT | 1cd9b5f2f06454dd7c35a3e2906ad2883ea83495 | 3b5b812788a34728d7b7484b10ae9434313380fe | refs/heads/master | 2022-12-05T18:45:36.805047 | 2020-08-27T22:01:34 | 2020-08-27T22:01:34 | 272,636,268 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,882 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import argparse
import torch
import json
import time
import os
import cv2
import math
from sklearn import metrics
from scipy import interpolate
import numpy as np
from torchvision.transforms import transforms as T
import torch.nn.functional as F
from models.model import create_model, load_model
from datasets.dataset.jde import JointDataset, collate_fn
from models.utils import _tranpose_and_gather_feat
from utils.utils import xywh2xyxy, ap_per_class, bbox_iou
from opts import opts
from models.decode import mot_decode
from utils.post_process import ctdet_post_process
def test_emb(
opt,
batch_size=16,
img_size=(1088, 608),
print_interval=40,
):
data_cfg = opt.data_cfg
f = open(data_cfg)
data_cfg_dict = json.load(f)
f.close()
nC = 1
test_paths = data_cfg_dict['test_emb']
dataset_root = data_cfg_dict['root']
if opt.gpus[0] >= 0:
opt.device = torch.device('cuda')
else:
opt.device = torch.device('cpu')
print('Creating model...')
model = create_model(opt.arch, opt.heads, opt.head_conv, opt)
model = load_model(model, opt.load_model)
# model = torch.nn.DataParallel(model)
model = model.to(opt.device)
model.eval()
# Get dataloader
transforms = T.Compose([T.ToTensor()])
dataset = JointDataset(opt, dataset_root, test_paths, img_size, augment=False, transforms=transforms)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False,
num_workers=8, drop_last=False)
embedding, id_labels = [], []
print('Extracting pedestrain features...')
for batch_i, batch in enumerate(dataloader):
t = time.time()
output = model(batch['img'].cuda())[-1]
id_head = _tranpose_and_gather_feat(output['id'], batch['ind'].cuda())
id_head = id_head[batch['reg_mask'].cuda() > 0].contiguous()
emb_scale = math.sqrt(2) * math.log(opt.nID - 1)
id_head = emb_scale * F.normalize(id_head)
id_target = batch['ids'].cuda()[batch['reg_mask'].cuda() > 0]
for i in range(0, id_head.shape[0]):
if len(id_head.shape) == 0:
continue
else:
feat, label = id_head[i], id_target[i].long()
if label != -1:
embedding.append(feat)
id_labels.append(label)
if batch_i % print_interval == 0:
print(
'Extracting {}/{}, # of instances {}, time {:.2f} sec.'.format(batch_i, len(dataloader), len(id_labels),
time.time() - t))
print('Computing pairwise similairity...')
if len(embedding) < 1:
return None
embedding = torch.stack(embedding, dim=0).cuda()
id_labels = torch.LongTensor(id_labels)
n = len(id_labels)
print(n, len(embedding))
assert len(embedding) == n
embedding = F.normalize(embedding, dim=1)
pdist = torch.mm(embedding, embedding.t()).cpu().numpy()
gt = id_labels.expand(n, n).eq(id_labels.expand(n, n).t()).numpy()
up_triangle = np.where(np.triu(pdist) - np.eye(n) * pdist != 0)
pdist = pdist[up_triangle]
gt = gt[up_triangle]
far_levels = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1]
far, tar, threshold = metrics.roc_curve(gt, pdist)
interp = interpolate.interp1d(far, tar)
tar_at_far = [interp(x) for x in far_levels]
for f, fa in enumerate(far_levels):
print('TPR@FAR={:.7f}: {:.4f}'.format(fa, tar_at_far[f]))
return tar_at_far
if __name__ == '__main__':
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
opt = opts().init()
with torch.no_grad():
map = test_emb(opt, batch_size=4)
| [
"[email protected]"
] | |
63745902cac53664d3f9579ce008dd6fc0d34866 | 1bb42bac177fb4e979faa441363c27cb636a43aa | /optimization/trainer_test.py | 3c9f7d0c623a496f1af9e0bdc4328d5c49ef83d1 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | google-research/federated | a6040e80fa0fbf533e0d665c66a9bc549d208b3d | 329e60fa56b87f691303638ceb9dfa1fc5083953 | refs/heads/master | 2023-08-28T13:10:10.885505 | 2023-08-22T23:06:08 | 2023-08-22T23:06:40 | 295,559,343 | 595 | 187 | Apache-2.0 | 2022-05-12T08:42:53 | 2020-09-14T23:09:07 | Python | UTF-8 | Python | false | false | 1,750 | py | # Copyright 2022, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from absl.testing import absltest
from absl.testing import flagsaver
from optimization import trainer
class TrainerTest(absltest.TestCase):
@flagsaver.flagsaver(
root_output_dir=tempfile.mkdtemp(),
experiment_name='test_experiment',
task='emnist_character',
clients_per_round=1,
total_rounds=2,
client_optimizer='sgd',
client_learning_rate=0.01,
server_optimizer='sgd',
server_learning_rate=1.0,
use_synthetic_data=True)
def test_executes_with_constant_client_lr(self):
trainer.main([])
@flagsaver.flagsaver(
root_output_dir=tempfile.mkdtemp(),
experiment_name='test_experiment',
task='emnist_character',
clients_per_round=1,
total_rounds=2,
client_optimizer='sgd',
client_learning_rate=0.01,
client_lr_schedule='exp_decay',
client_lr_decay_steps=1,
client_lr_decay_rate=0.1,
client_lr_staircase=True,
server_optimizer='sgd',
server_learning_rate=1.0,
use_synthetic_data=True)
def test_executes_with_client_lr_schedule(self):
trainer.main([])
if __name__ == '__main__':
absltest.main()
| [
"[email protected]"
] | |
383b5d0f0074a747db4569fd076744c2879966a0 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/394/usersdata/313/74777/submittedfiles/ex11.py | eaab70ccdbf4c3cc2b52bc2a69482ed98b67762c | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | # -*- coding: utf-8 -*-
n1=12 dias
n2=6 meses
n3=1980 ano
n4=20 dias
n5=12 meses
n6=1989 ano
| [
"[email protected]"
] | |
dd79dec37c06033bdff6d7411c8f6c3d09d8f37d | ffef4697f09fb321a04f2b3aad98b688f4669fb5 | /tests/mindspore_test_framework/utils/block_util.py | 9d75ae0888ac00f966c55f42d14cc03bdb2d3a8c | [
"Apache-2.0",
"AGPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"MPL-1.1",
"LicenseRef-scancode-proprietary-license",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"MPL-2.0",
"LGPL-2.1-only",
"GPL-2.0-only",
"Libpng",
"BSL-1.0",
"MIT",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause",
"MPL-1.0",
"BSD-3-Clause"
] | permissive | Ewenwan/mindspore | 02a0f1fd660fa5fec819024f6feffe300af38c9c | 4575fc3ae8e967252d679542719b66e49eaee42b | refs/heads/master | 2021-05-19T03:38:27.923178 | 2020-03-31T05:49:10 | 2020-03-31T05:49:10 | 251,512,047 | 1 | 0 | Apache-2.0 | 2020-03-31T05:48:21 | 2020-03-31T05:48:20 | null | UTF-8 | Python | false | false | 13,610 | py | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Utils for Cell related computation."""
# pylint: disable=missing-docstring
import numpy as np
from mindspore.common.api import _executor, ms_function
from mindspore.common.tensor import Tensor
from mindspore import nn, context
from mindspore.ops.composite import GradOperation
from mindspore.ops import functional as F
from mindspore.ops import operations as P
from mindspore import ParameterTuple
from . import keyword
def get_uniform_with_shape(shape):
np.random.seed(1)
return np.random.uniform(-0.1, 0.1, size=shape).astype(np.float32)
def set_block_param_with_rand(net, rand_func=None):
if not isinstance(net, nn.Cell) or rand_func is None:
return
for param in net.trainable_params():
param.default_input = Tensor(rand_func(param.default_input.asnumpy().shape))
def compile_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training)
set_block_param_with_rand(net, rand_func)
return _executor.compile(net, *inputs)
def run_block(net, *inputs, rand_func=None, training=True):
set_block_training(net, training)
set_block_param_with_rand(net, rand_func)
if context.get_context("mode") == context.PYNATIVE_MODE:
def func_pynative(*inputs):
@ms_function
def _func_pynative(*inputs):
return net(*inputs)
return _func_pynative(*inputs)
return func_pynative(*inputs)
return net(*inputs)
class IthOutputCell(nn.Cell):
def __init__(self, network, output_index):
if isinstance(network, nn.Cell):
super(IthOutputCell, self).__init__(auto_prefix=False)
else:
super(IthOutputCell, self).__init__()
self.network = network
self.output_index = output_index
def construct(self, *inputs):
raise NotImplementedError
def construct1(self, x1):
predict = self.network(x1)[self.output_index]
return predict
def construct2(self, x1, x2):
predict = self.network(x1, x2)[self.output_index]
return predict
def construct3(self, x1, x2, x3):
predict = self.network(x1, x2, x3)[self.output_index]
return predict
def construct4(self, x1, x2, x3, x4):
predict = self.network(x1, x2, x3, x4)[self.output_index]
return predict
def construct5(self, x1, x2, x3, x4, x5):
predict = self.network(x1, x2, x3, x4, x5)[self.output_index]
return predict
def get_output_cell(network, num_input, output_index, training=True):
net = IthOutputCell(network, output_index)
f = getattr(net, 'construct%d' % num_input)
setattr(net, "construct", f)
set_block_training(net, training)
return net
class OutputReduceSumCell(nn.Cell):
def __init__(self, network, output_num):
super(OutputReduceSumCell, self).__init__()
self.output_num = output_num
self.network = network
self.reduce_sum = P.ReduceSum()
def construct(self, *inputs):
if self.output_num == 1:
return self.reduce_sum(self.network(*inputs), None)
ret = F.make_tuple()
for index in range(self.output_num):
predict = self.network(*inputs)[index]
predict_reduce = self.reduce_sum(predict, None)
ret = ret + F.make_tuple(predict_reduce)
return ret
def get_output_reduce_cell(network, output_num, training=True):
net = OutputReduceSumCell(network, output_num)
set_block_training(net, training)
return net
class InputOpNet(nn.Cell):
def __init__(self, op, c1=None, c2=None, c3=None, c4=None):
super(InputOpNet, self).__init__()
self.op = op
self.c1 = c1
self.c2 = c2
self.c3 = c3
self.c4 = c4
def construct(self, *inputs):
raise NotImplementedError
def construct0_c0_fake(self, data):
x = self.op() + data
return x
def construct0_c1_fake(self, data):
x = self.op(self.c1) + data
return x
def construct0_c2_fake(self, data):
x = self.op(self.c1, self.c2) + data
return x
def construct0_c3_fake(self, data):
x = self.op(self.c1, self.c2, self.c3) + data
return x
def construct0_c0(self):
x = self.op()
return x
def construct0_c1(self):
x = self.op(self.c1)
return x
def construct0_c2(self):
x = self.op(self.c1, self.c2)
return x
def construct1_c0(self, x1):
x = self.op(x1)
return x
def construct1_c1(self, x1):
x = self.op(x1, self.c1)
return x
def construct1_c2(self, x1):
x = self.op(x1, self.c1, self.c2)
return x
def construct1_c3(self, x1):
x = self.op(x1, self.c1, self.c2, self.c3)
return x
def construct1_c4(self, x1):
x = self.op(x1, self.c1, self.c2, self.c3, self.c4)
return x
def constructc1_1(self, x1):
x = self.op(self.c1, x1)
return x
def construct2_c0(self, x1, x2):
x = self.op(x1, x2)
return x
def construct2_c1(self, x1, x2):
x = self.op(x1, x2, self.c1)
return x
def construct2_c3(self, x1, x2):
x = self.op(x1, x2, self.c1, self.c2, self.c3)
return x
def construct3_c0(self, x1, x2, x3):
x = self.op(x1, x2, x3)
return x
def construct3_c1(self, x1, x2, x3):
x = self.op(x1, x2, x3, self.c1)
return x
def construct4_c0(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4)
return x
def construct4_c1(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4, self.c1)
return x
def construct4_c4(self, x1, x2, x3, x4):
x = self.op(x1, x2, x3, x4, self.c1, self.c2, self.c3, self.c4)
return x
def construct5_c0(self, x1, x2, x3, x4, x5):
x = self.op(x1, x2, x3, x4, x5)
return x
def construct6_c0(self, x1, x2, x3, x4, x5, x6):
x = self.op(x1, x2, x3, x4, x5, x6)
return x
def construct5_c1(self, x1, x2, x3, x4, x5):
x = self.op(x1, x2, x3, x4, x5, self.c1)
return x
def gen_net(op, input_num, training=True, desc_const=(), const_first=False, add_fake_input=False):
if isinstance(op, nn.Cell):
return op
net = InputOpNet(op, *desc_const)
if const_first:
fn_name = 'constructc%d_%d' % (len(desc_const), input_num)
else:
fn_name = 'construct%d_c%d' % (input_num, len(desc_const))
if add_fake_input:
fn_name += '_fake'
f = getattr(net, fn_name)
setattr(net, "construct", f)
set_block_training(net, training)
return net
class OperationBackward(nn.Cell):
def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell):
super(OperationBackward, self).__init__(auto_prefix=False)
else:
super(OperationBackward, self).__init__()
self.network = network
self.grad = grad_op
self.sens = sens
def construct(self, *inputs):
return self.grad(self.network)(*inputs, self.sens)
class OperationBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op):
if isinstance(network, nn.Cell):
super(OperationBackwardWithNoSens, self).__init__(auto_prefix=False)
else:
super(OperationBackwardWithNoSens, self).__init__()
self.network = network
self.grad = grad_op
def construct(self, *inputs):
return self.grad(self.network)(*inputs)
class NNBackward(nn.Cell):
def __init__(self, network, grad_op, sens):
if isinstance(network, nn.Cell):
super(NNBackward, self).__init__(auto_prefix=False)
else:
super(NNBackward, self).__init__()
self.network = network
self.grad = grad_op
self.params = ParameterTuple(network.trainable_params())
self.sens = sens
def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs, self.sens)
class NNBackwardWithNoSens(nn.Cell):
def __init__(self, network, grad_op):
if isinstance(network, nn.Cell):
super(NNBackwardWithNoSens, self).__init__(auto_prefix=False)
else:
super(NNBackwardWithNoSens, self).__init__()
self.network = network
self.grad = grad_op
self.params = ParameterTuple(network.trainable_params())
def construct(self, *inputs):
return self.grad(self.network, self.params)(*inputs)
def gen_grad_net(net, grad_op, input_num, sens=None, training=True, desc_const=(),
const_first=False, add_fake_input=False):
if not isinstance(net, nn.Cell):
net = gen_net(net, input_num, desc_const=desc_const, const_first=const_first, add_fake_input=add_fake_input)
if grad_op.get_by_list:
if grad_op.sens_param:
net = NNBackward(net, grad_op, sens)
else:
net = NNBackwardWithNoSens(net, grad_op)
else:
if grad_op.sens_param:
net = OperationBackward(net, grad_op, sens)
else:
net = OperationBackwardWithNoSens(net, grad_op)
set_block_training(net, training)
return net
def set_block_training(net, training=True):
if isinstance(net, nn.Cell):
net.set_train(training)
def set_block_phase(net, phase='train'):
if isinstance(net, nn.Cell):
net.phase = phase
def create_funcs(verification_set, block_generator, block_runner, grad_op=None, default_rand_func=None):
def create_func(block, num_outputs, rand_func, desc_const, const_first, add_fake_input, split_outputs):
def function(*inputs):
# gradient
if grad_op:
if num_outputs == 0:
grad_op_ = GradOperation('grad', get_all=grad_op.get_all,
get_by_list=grad_op.get_by_list, sens_param=False)
b = block_generator(block, grad_op_, len(inputs), desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *inputs, rand_func=rand_func)
if num_outputs == 1:
b = block_generator(block, grad_op, len(inputs) - 1, inputs[-1], desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *(inputs[:-1]), rand_func=rand_func)
if split_outputs:
block_inputs = inputs[0:len(inputs) - num_outputs]
sens_inputs = inputs[len(inputs) - num_outputs:]
ret = []
for i in range(num_outputs):
bi_inputs = list(block_inputs)
bi = get_output_cell(block, len(block_inputs), i)
bi = block_generator(bi, grad_op, len(bi_inputs), sens_inputs[i], desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
grads_i = block_runner(bi, *bi_inputs, rand_func=rand_func)
if isinstance(grads_i, tuple):
ret.extend(grads_i)
else:
ret.append(grads_i)
return ret
block_inputs = inputs[0:len(inputs) - num_outputs]
sens_inputs = tuple(inputs[len(inputs) - num_outputs:])
b = block_generator(block, grad_op, len(block_inputs), sens_inputs, desc_const=desc_const,
const_first=const_first, add_fake_input=add_fake_input)
return block_runner(b, *block_inputs, rand_func=rand_func)
# forward
inputs_num = len(inputs)
if add_fake_input and inputs_num == 1:
# input is faked
inputs_num = 0
b = block_generator(block, inputs_num, desc_const=desc_const, const_first=const_first,
add_fake_input=add_fake_input)
return block_runner(b, *inputs, rand_func=rand_func)
return function
bc_configs = verification_set[keyword.function]
for config in bc_configs:
block = config[keyword.block]
rand_func = config.get(keyword.init_param_with, default_rand_func)
num_outputs = config.get(keyword.num_outputs, 0)
desc_const = config.get(keyword.desc_const, [])
const_first = config.get(keyword.const_first, False)
add_fake_input = config.get(keyword.add_fake_input, False)
split_outputs = config.get(keyword.split_outputs, True)
config[keyword.block] = create_func(block, num_outputs, rand_func, desc_const,
const_first, add_fake_input, split_outputs)
return bc_configs
| [
"[email protected]"
] | |
2747983057867ca48f64796098f4a6e65983e0aa | d806dd4a6791382813d2136283a602207fb4b43c | /sirius/blueprints/api/remote_service/tambov/app.py | 5efe34267189b393a92b6edd77d8330405506b2e | [] | no_license | MarsStirner/sirius | 5bbf2a03dafb7248db481e13aff63ff989fabbc2 | 8839460726cca080ca8549bacd3a498e519c8f96 | refs/heads/master | 2021-03-24T12:09:14.673193 | 2017-06-06T16:28:53 | 2017-06-06T16:28:53 | 96,042,947 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | #! coding:utf-8
"""
@author: BARS Group
@date: 23.09.2016
"""
from .config import MODULE_NAME
from flask import Blueprint
module = Blueprint(MODULE_NAME, __name__, url_prefix='/tambov')
# from .passive import *
| [
"[email protected]"
] | |
bdce4da9f34c04c3473350ce8923ddf0eaa42313 | b8d9bba87ffb1c6945fb1c9268a986587e672785 | /Madu_Ionascu/temp_reed.py | 10a0e03ca0530ba48ba09f9e47489789fb1c408c | [] | no_license | patilanup246/Projects | 4f510f5965a2b5c1ca72dd94e70f53e14c7dac59 | b41aaa052a9f211065c184b7a0e167c089aefbc5 | refs/heads/master | 2021-02-28T00:14:01.330374 | 2018-09-01T12:26:29 | 2018-09-01T12:26:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 925 | py | '''
Created on Jul 4, 2018
@author: talib
'''
import xmltodict, requests, json
all_urls = []
urls = [
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0000.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0001.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0002.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0003.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0004.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0005.xml',
'https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_0006.xml'
]
x = xmltodict.parse(requests.get('https://www.reed.co.uk/sitemaps/livejobs/sitemap_livejobs_index.xml').text)
last_mod = ''
for m in reversed(x['sitemapindex']['sitemap']):
print (m['loc'])
last_mod = m['lastmod'].split('T')[0]
#https://www.totaljobs.com/jobs-sitemaps/01.xml | [
"[email protected]"
] | |
6e1af1e92961bc6cf4364d8727c6e9e240433d9a | 85a9ffeccb64f6159adbd164ff98edf4ac315e33 | /pysnmp/DLINK-3100-MNGINF-MIB.py | 9ae0c56579fbb5e55d9b95037bfb35f2681f9fa8 | [
"Apache-2.0"
] | permissive | agustinhenze/mibs.snmplabs.com | 5d7d5d4da84424c5f5a1ed2752f5043ae00019fb | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | refs/heads/master | 2020-12-26T12:41:41.132395 | 2019-08-16T15:51:41 | 2019-08-16T15:53:57 | 237,512,469 | 0 | 0 | Apache-2.0 | 2020-01-31T20:41:36 | 2020-01-31T20:41:35 | null | UTF-8 | Python | false | false | 9,474 | py | #
# PySNMP MIB module DLINK-3100-MNGINF-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/DLINK-3100-MNGINF-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 18:33:45 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection")
rnd, = mibBuilder.importSymbols("DLINK-3100-MIB", "rnd")
InetAddressType, InetAddress = mibBuilder.importSymbols("INET-ADDRESS-MIB", "InetAddressType", "InetAddress")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
iso, ModuleIdentity, Gauge32, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Unsigned32, Bits, IpAddress, TimeTicks, Counter32, Counter64, MibIdentifier, Integer32, NotificationType = mibBuilder.importSymbols("SNMPv2-SMI", "iso", "ModuleIdentity", "Gauge32", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Unsigned32", "Bits", "IpAddress", "TimeTicks", "Counter32", "Counter64", "MibIdentifier", "Integer32", "NotificationType")
TruthValue, TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "DisplayString", "RowStatus")
rlMngInf = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89))
rlMngInf.setRevisions(('2003-09-21 00:00',))
if mibBuilder.loadTexts: rlMngInf.setLastUpdated('200309210000Z')
if mibBuilder.loadTexts: rlMngInf.setOrganization('Dlink, Inc.')
class RlMngInfServiceType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("dontCare", 0), ("telnet", 1), ("snmp", 2), ("http", 3), ("https", 4), ("ssh", 5), ("icmp", 6))
class RlMngInfActionType(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(0, 1))
namedValues = NamedValues(("permit", 0), ("deny", 1))
rlMngInfMibVersion = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfMibVersion.setStatus('current')
rlMngInfEnable = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 2), TruthValue()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfEnable.setStatus('current')
rlMngInfActiveListName = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 3), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(0, 32))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfActiveListName.setStatus('current')
rlMngInfListTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4), )
if mibBuilder.loadTexts: rlMngInfListTable.setStatus('current')
rlMngInfListEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1), ).setIndexNames((0, "DLINK-3100-MNGINF-MIB", "rlMngInfListName"), (0, "DLINK-3100-MNGINF-MIB", "rlMngInfListPriority"))
if mibBuilder.loadTexts: rlMngInfListEntry.setStatus('current')
rlMngInfListName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfListName.setStatus('current')
rlMngInfListPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfListPriority.setStatus('current')
rlMngInfListIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListIfIndex.setStatus('current')
rlMngInfListIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 4), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListIpAddr.setStatus('current')
rlMngInfListIpNetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 5), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListIpNetMask.setStatus('current')
rlMngInfListService = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 6), RlMngInfServiceType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListService.setStatus('current')
rlMngInfListAction = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 7), RlMngInfActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListAction.setStatus('current')
rlMngInfListRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 4, 1, 8), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListRowStatus.setStatus('current')
rlMngInfAuditingEnable = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 5), TruthValue().clone('true')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfAuditingEnable.setStatus('current')
rlMngInfListInetTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6), )
if mibBuilder.loadTexts: rlMngInfListInetTable.setStatus('current')
rlMngInfListInetEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1), ).setIndexNames((0, "DLINK-3100-MNGINF-MIB", "rlMngInfListInetName"), (0, "DLINK-3100-MNGINF-MIB", "rlMngInfListInetPriority"))
if mibBuilder.loadTexts: rlMngInfListInetEntry.setStatus('current')
rlMngInfListInetName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 1), DisplayString().subtype(subtypeSpec=ValueSizeConstraint(1, 32))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfListInetName.setStatus('current')
rlMngInfListInetPriority = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 2), Unsigned32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: rlMngInfListInetPriority.setStatus('current')
rlMngInfListInetIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 3), Unsigned32()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIfIndex.setStatus('current')
rlMngInfListInetIpAddrType = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 4), InetAddressType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIpAddrType.setStatus('current')
rlMngInfListInetIpAddr = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 5), InetAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIpAddr.setStatus('current')
rlMngInfListInetIpNetMask = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 6), IpAddress()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIpNetMask.setStatus('current')
rlMngInfListInetService = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 7), RlMngInfServiceType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetService.setStatus('current')
rlMngInfListInetAction = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 8), RlMngInfActionType()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetAction.setStatus('current')
rlMngInfListInetRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 9), RowStatus()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetRowStatus.setStatus('current')
rlMngInfListInetIPv6PrefixLength = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 94, 89, 89, 89, 6, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 128)).clone(128)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: rlMngInfListInetIPv6PrefixLength.setStatus('current')
mibBuilder.exportSymbols("DLINK-3100-MNGINF-MIB", RlMngInfActionType=RlMngInfActionType, rlMngInfListIpNetMask=rlMngInfListIpNetMask, rlMngInfListIfIndex=rlMngInfListIfIndex, rlMngInfListInetIpNetMask=rlMngInfListInetIpNetMask, rlMngInfListInetRowStatus=rlMngInfListInetRowStatus, rlMngInfListInetName=rlMngInfListInetName, RlMngInfServiceType=RlMngInfServiceType, rlMngInfActiveListName=rlMngInfActiveListName, rlMngInfListInetService=rlMngInfListInetService, rlMngInfListIpAddr=rlMngInfListIpAddr, rlMngInfListPriority=rlMngInfListPriority, rlMngInfListService=rlMngInfListService, rlMngInfListEntry=rlMngInfListEntry, rlMngInfListInetEntry=rlMngInfListInetEntry, rlMngInfListInetIpAddrType=rlMngInfListInetIpAddrType, rlMngInfEnable=rlMngInfEnable, rlMngInfListRowStatus=rlMngInfListRowStatus, rlMngInfListInetIPv6PrefixLength=rlMngInfListInetIPv6PrefixLength, rlMngInfListInetIfIndex=rlMngInfListInetIfIndex, rlMngInfListName=rlMngInfListName, rlMngInfListInetTable=rlMngInfListInetTable, PYSNMP_MODULE_ID=rlMngInf, rlMngInfMibVersion=rlMngInfMibVersion, rlMngInfListAction=rlMngInfListAction, rlMngInfListInetAction=rlMngInfListInetAction, rlMngInfAuditingEnable=rlMngInfAuditingEnable, rlMngInfListInetPriority=rlMngInfListInetPriority, rlMngInfListInetIpAddr=rlMngInfListInetIpAddr, rlMngInf=rlMngInf, rlMngInfListTable=rlMngInfListTable)
| [
"[email protected]"
] | |
ae4734272922a8d41554f5570d5833d29d7740c0 | 0809ea2739d901b095d896e01baa9672f3138825 | /beerCBVsproject3/testApp/migrations/0001_initial.py | 72344f678fe4183641576195edd65c14aa3c7c7d | [] | no_license | Gagangithub1988/djangoprojects | dd001f2184e78be2fb269dbfdc8e3be1dd71ce43 | ea236f0e4172fbf0f71a99aed05ed7c7b38018e2 | refs/heads/master | 2022-11-15T23:46:46.134247 | 2020-07-15T06:37:51 | 2020-07-15T06:37:51 | 273,479,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 656 | py | # Generated by Django 3.0.5 on 2020-04-24 18:54
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Beer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('taste', models.CharField(max_length=100)),
('color', models.CharField(max_length=100)),
('price', models.IntegerField()),
],
),
]
| [
"[email protected]"
] | |
46046df20b6051e55e61120498642b3a02c738e9 | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/PyQt5/QtLocation/QPlaceSearchSuggestionReply.py | a7fd9df4cbf12d58e513742da7326324ba55a59a | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,851 | py | # encoding: utf-8
# module PyQt5.QtLocation
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\PyQt5\QtLocation.pyd
# by generator 1.146
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import sip as __sip
from .QPlaceReply import QPlaceReply
class QPlaceSearchSuggestionReply(QPlaceReply):
""" QPlaceSearchSuggestionReply(parent: QObject = None) """
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def connectNotify(self, *args, **kwargs): # real signature unknown
pass
def customEvent(self, *args, **kwargs): # real signature unknown
pass
def disconnectNotify(self, *args, **kwargs): # real signature unknown
pass
def isSignalConnected(self, *args, **kwargs): # real signature unknown
pass
def receivers(self, *args, **kwargs): # real signature unknown
pass
def sender(self, *args, **kwargs): # real signature unknown
pass
def senderSignalIndex(self, *args, **kwargs): # real signature unknown
pass
def setError(self, *args, **kwargs): # real signature unknown
pass
def setFinished(self, *args, **kwargs): # real signature unknown
pass
def setSuggestions(self, Iterable, p_str=None): # real signature unknown; restored from __doc__
""" setSuggestions(self, Iterable[str]) """
pass
def suggestions(self): # real signature unknown; restored from __doc__
""" suggestions(self) -> List[str] """
return []
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def type(self): # real signature unknown; restored from __doc__
""" type(self) -> QPlaceReply.Type """
pass
def __init__(self, parent=None): # real signature unknown; restored from __doc__
pass
| [
"[email protected]"
] | |
d03362a47d6d6353442a8ea6f2dc2bd1c0e66d55 | 16321b44c2e41011885dbdef1b0e59d864af5ea6 | /django_project/core/settings/secret.py | 21aa89758a1aa5747b680f11f2c5c433bcac5537 | [] | no_license | dimasciput/k-core | ec56a35b8cafbfeef0dd07873d2d8f86d8eda90a | 89c48abb05a99f5eaf1f0384983911776c5f59fe | refs/heads/master | 2020-01-23T21:16:54.726880 | 2016-11-24T06:50:10 | 2016-11-24T06:50:10 | 74,568,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 468 | py | SECRET_KEY = u'p$))kf5wvh5@6a%sr1pgy2ef+^pm%w2=8nu%@7j$21irf#$))r'
# From https://disqus.com/api/applications/4529806/
COMMENTS_DISQUS_API_PUBLIC_KEY = u'sWCDf4qw6mZ5tYkM8CU7A5kqlxM74Ajaw5gilX64nPprp2q6yHJSUn5oUcrbMKCK'
COMMENTS_DISQUS_API_SECRET_KEY = u'io50zkLU88M0PLscytLHtjDv4lwv0YjmRGQgNkumtdcC39jzTDQy8W8kj3EybLqf'
COMMENTS_DISQUS_SHORTNAME = u'kartoza'
SENTRY_DSN='http://ca7dc786b6a5416089627f9c291e074f:[email protected]/21'
| [
"[email protected]"
] | |
b65ee1e26db4448dce91c9971c84695fcda6e6e4 | 082053ebaaf102d89be2be2c6d4a0600e96897d8 | /chat/chat.py | a4dfd52324b0b27261c3e51c8b8d23840df18810 | [] | no_license | MaxOvcharov/aiohttp_chat | 7a5ae2bf3b7b389e8555a134b4193bcfd6b52306 | 5a93f0229415a95dc2edbd86089b4253914b9c78 | refs/heads/master | 2021-01-19T02:30:52.940731 | 2017-08-14T19:51:56 | 2017-08-14T19:51:56 | 87,286,281 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,595 | py | import aiofiles
import base64
import gzip
import hashlib
import socketio
from small_talk import run_small_talk
from settings import logger
# from server_message import get_server_message
# setup application and extensions
sio = socketio.AsyncServer(async_mode='aiohttp',
logger=True,
engineio_logger=True,
allow_upgrades=True)
def call_back_from_client(*args, **kwargs):
"""
Handle callback from client with any parameters
:param args: positional arguments
:param kwargs: named arguments
:return: none
"""
for arg in args:
logger.debug('My EVENT(FILE CALLBACK - args) %s' % arg)
for key, value in kwargs:
logger.debug('My EVENT(FILE CALLBACK - kwargs) %s:%s' % (key, value))
@sio.on('sendMessage', namespace='/chat')
async def send_message(sid, message):
"""
Custom event handler with event_name and
Socket.IO namespace for the event. This handler works like echo-server.
:param sid: Session ID of the client
:param message: message payload
:return: None
"""
# Added transport mode checker
transport_mode = sio.transport(sid)
logger.debug('MESSAGE TRANSPORT MODE (%s): %s' % (sid, transport_mode))
logger.debug('EVENT("sendMessage"): %s' % message['data'])
try:
if isinstance(message, dict):
if message.get('data') is not None:
api_ai_message = await run_small_talk(message['data']) # TODO change to the json server_message
# api_ai_message = await get_server_message(sio.pg, message)
await sio.emit('sendMessageResponse',
{'data': api_ai_message},
room=sid, namespace='/chat')
logger.debug('EVENT("sendMessageResponse"): %s' % api_ai_message)
else:
raise ValueError('Message should have key("data")')
else:
raise TypeError('Message should be dict: {"data": "some text"}')
except ValueError as e:
logger.error('Handle ERROR: %s' % e)
except TypeError as e1:
logger.error('Handle ERROR: %s' % e1)
@sio.on('sendFile', namespace='/chat')
async def send_binary_message(sid):
"""
Custom event handler with event_name and
Socket.IO namespace for the event. This handler send
image file in base64 gzip.
:param sid: Session ID of the client
:return: emit file base64 gzip
"""
content_b64 = ''
hash_sum = ''
try:
async with aiofiles.open('static/test.png', mode='rb') as image_file:
content = await image_file.read()
gzip_file = gzip.compress(content)
content_b64 = base64.b64encode(gzip_file)
hash_sum = hashlib.md5(content_b64).hexdigest()
except OSError as e:
logger.error('Handle ERROR: %s' % e)
await sio.emit('file response',
{'data': content_b64.decode('utf-8'), 'hash_sum': hash_sum},
room=sid,
namespace='/chat',
callback=call_back_from_client)
logger.debug('My EVENT(FILE) (%s): %s' % (sid, content_b64[:20]))
del content_b64
@sio.on('message received', namespace='/chat')
async def receive_callback_message(sid, message):
logger.debug('My EVENT(CALL BACK) (%s): %s' % (sid, message))
return True
@sio.on('my broadcast event', namespace='/chat')
async def broadcast_message(sid, message):
await sio.emit('my response', {'data': message['data']}, namespace='/chat')
logger.debug('BROADCAST MESSAGE(%s): %s' % (sid, message))
@sio.on('join', namespace='/chat')
async def join_room(sid, message):
sio.enter_room(sid, message['room'], namespace='/chat')
await sio.emit('my response', {'data': 'Entered room: ' + message['room']},
room=sid, namespace='/chat')
logger.debug('JOIN ROOM (%s): %s' % (sid, message))
@sio.on('leave', namespace='/chat')
async def leave_room(sid, message):
sio.leave_room(sid, message['room'], namespace='/chat')
await sio.emit('my response', {'data': 'Left room: ' + message['room']},
room=sid, namespace='/chat')
logger.debug('LEAVE ROOM (%s): %s' % (sid, message))
@sio.on('close room', namespace='/chat')
async def close(sid, message):
await sio.emit('my response', {'data': 'Room %s is closing' % message['room']},
room=message['room'], namespace='/chat')
await sio.close_room(message['room'], namespace='/chat')
logger.debug('CLOSE ROOM (%s): %s' % (sid, message))
@sio.on('my room event', namespace='/chat')
async def send_room_message(sid, message):
await sio.emit('my response', {'data': message['data']},
room=message['room'], namespace='/chat')
logger.debug('ROOM EVENT (%s): %s' % (sid, message))
@sio.on('disconnect request', namespace='/chat')
async def disconnect_request(sid):
await sio.disconnect(sid, namespace='/chat')
logger.debug('DISCONNECT REQUEST: %s' % sid)
@sio.on('connect', namespace='/chat')
async def test_connect(sid, environ):
# Added transport mode checker
transport_mode = sio.transport(sid)
logger.debug('CONNECT TRANSPORT MODE (%s): %s' % (sid, transport_mode))
await sio.emit('my response', {'data': 'Connected', 'count': 0},
room=sid, namespace='/chat')
logger.debug('CONNECT USER: %s, ENVIRON: %s' % (sid, environ))
@sio.on('disconnect', namespace='/chat')
def test_disconnect(sid):
logger.debug('DISCONNECT USER: %s' % sid)
| [
"[email protected]"
] | |
146a1580d6ef0ff45e2cebf1fb7b0d317fb2a51a | de702e4f4a2344c891d396bb8332a90d042b0971 | /Back-End/Django/Building Django 2.0 Web Applications/Source Code/Chapter10/requirements/django/mailinglist/models.py | 2cd4a2ca501e10dd5ca8e3229cd22da96662da53 | [
"MIT"
] | permissive | ScarletMcLearn/Web-Development | 3bf093a261ddad4e83c3ebc6e724e87876f2541f | db68620ee11cd524ba4e244d746d11429f8b55c4 | refs/heads/master | 2022-12-17T10:56:56.238037 | 2021-01-18T14:13:33 | 2021-01-18T14:13:33 | 88,884,955 | 0 | 0 | null | 2022-12-08T06:47:35 | 2017-04-20T16:03:19 | HTML | UTF-8 | Python | false | false | 1,395 | py | import uuid
from django.conf import settings
from django.db import models
from django.urls import reverse
class MailingList(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(max_length=140)
owner = models.ForeignKey(to=settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse(
'mailinglist:manage_mailinglist',
kwargs={'pk': self.id}
)
def user_can_use_mailing_list(self, user):
return user == self.owner
class Subscriber(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
email = models.EmailField()
confirmed = models.BooleanField(default=False)
mailing_list = models.ForeignKey(to=MailingList, on_delete=models.CASCADE)
class Meta:
unique_together = ['email', 'mailing_list', ]
class Message(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
mailing_list = models.ForeignKey(to=MailingList, on_delete=models.CASCADE)
subject = models.CharField(max_length=140)
body = models.TextField()
started = models.DateTimeField(default=None, null=True)
finished = models.DateTimeField(default=None, null=True)
| [
"[email protected]"
] | |
b073ca66bee01aa9bba4709f2992bb837691dcb3 | 2dc17d12ff6ea9794177c81aa4f385e4e09a4aa5 | /archive/1059. All Paths from Source Lead to Destination.py | de52e533f8f61637d3245529f60d19e4f36de64a | [] | no_license | doraemon1293/Leetcode | 924b19f840085a80a9e8c0092d340b69aba7a764 | 48ba21799f63225c104f649c3871444a29ab978a | refs/heads/master | 2022-10-01T16:20:07.588092 | 2022-09-08T02:44:56 | 2022-09-08T02:44:56 | 122,086,222 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | class Solution:
def leadsToDestination(self, n: int, edges: List[List[int]], source: int, destination: int) -> bool:
graph = {}
for a, b in edges:
graph.setdefault(a, [])
graph[a].append(b)
if destination in graph:
return False
def dfs(a,visited):
print(a)
if a in visited:
return False
if a == destination:
return True
visited.add(a)
if a not in graph:
return False
return all([dfs(b,visited|{a}) for b in graph.get(a, [])])
return dfs(source,set())
| [
"19241008o"
] | 19241008o |
2ac05eb7b392163cce2a2c6d6ec70bb06ab9522c | 314cf05e7acdfb2b83bf4a56de4ee65310bd28f2 | /tests/outcomes/plot/hist/universal_tests/data_simple/pandas_column_string_plot_kind.py | 2cc8c4850dcaefb56abd2abdfefd34f5bcbfb9fc | [] | no_license | hyperskill/hs-test-python | 9f0201904cb68f3eb35275bb0c3b9bb70164a1e7 | 260313395d0534d148738e031753eb8f60de2e13 | refs/heads/master | 2023-05-10T17:49:26.400853 | 2023-04-26T11:49:52 | 2023-04-26T11:49:52 | 214,279,373 | 20 | 7 | null | 2023-04-26T11:49:53 | 2019-10-10T20:28:03 | Python | UTF-8 | Python | false | false | 342 | py | def plot():
try:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
except ModuleNotFoundError:
return
df = pd.DataFrame(np.array([[1, 2], [2, 3], [3, 4], [4, 5], [5, 6]]),
columns=['one', 'two'])
df['one'].plot(kind='hist')
plt.show()
plot()
| [
"[email protected]"
] | |
9456192ec098923d15a8d3488c7e0a16124be1d2 | d93d4f6aafc3f1ed4231d383fa68d9a98abe2721 | /example/typefit_hn/models.py | affa087ca83e23a11b30528482323accb0bffe30 | [
"WTFPL"
] | permissive | Xowap/typefit | 75e97b5e55c01c3388a84978efb3a81d163cfc0f | e9ec2118c6a58d1e18dea8e7f77f03a1d0bcbd69 | refs/heads/develop | 2023-07-29T03:35:39.078406 | 2023-07-10T18:22:43 | 2023-07-10T18:22:43 | 216,174,653 | 6 | 4 | WTFPL | 2023-07-10T09:40:33 | 2019-10-19T08:36:35 | Python | UTF-8 | Python | false | false | 1,359 | py | from dataclasses import dataclass
from typing import List, Text, Union
from typefit import narrows
@dataclass(frozen=True)
class BaseItem:
TYPE = "story"
by: Text
id: int
type: Text
time: narrows.TimeStamp
def __post_init__(self):
if self.type != self.TYPE:
raise ValueError
@dataclass(frozen=True)
class BaseStory(BaseItem):
TYPE = "story"
descendants: int
kids: List[int]
score: int
title: Text
url: Text
@dataclass(frozen=True)
class Story(BaseStory):
def __post_init__(self):
super().__post_init__()
if self.__class__ is Story:
if not self.url:
raise ValueError
@dataclass(frozen=True)
class Ask(BaseStory):
text: Text
@dataclass(frozen=True)
class Comment(BaseItem):
TYPE = "comment"
kids: List[int]
parent: int
text: Text
@dataclass(frozen=True)
class Job(BaseItem):
TYPE = "job"
score: int
text: Text
title: Text
url: Text
@dataclass(frozen=True)
class Poll(BaseItem):
TYPE = "poll"
descendants: int
kids: List[int]
parts: List[int]
score: int
text: Text
title: Text
@dataclass(frozen=True)
class PollOption(BaseItem):
TYPE = "pollopt"
poll: int
score: int
text: Text
Item = Union[Story, Ask, Comment, Job, Poll, PollOption]
| [
"[email protected]"
] | |
d78e9caf936a897080e27fa893980c29c39c9ba0 | 60eb98538025c61cf94a91f6c96f9ee81dcd3fdf | /monai/handlers/utils.py | 4ebaca22105c34065240fe055d9ccd7373d93806 | [
"Apache-2.0",
"LicenseRef-scancode-free-unknown"
] | permissive | gagandaroach/MONAI | 167e7746995d4b6136731881e22ad4df333b16a9 | 79b83d9fac41efae9b90ed2f9ad078d6d664bf64 | refs/heads/master | 2023-06-02T19:54:47.737846 | 2021-06-24T18:34:02 | 2021-06-24T18:34:02 | 270,741,899 | 0 | 0 | Apache-2.0 | 2020-06-08T16:29:32 | 2020-06-08T16:29:31 | null | UTF-8 | Python | false | false | 10,847 | py | # Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import warnings
from collections import OrderedDict
from typing import TYPE_CHECKING, Dict, List, Optional, Sequence, Union
import numpy as np
import torch
from monai.config import KeysCollection
from monai.utils import ensure_tuple, exact_version, get_torch_version_tuple, optional_import
idist, _ = optional_import("ignite", "0.4.4", exact_version, "distributed")
if TYPE_CHECKING:
from ignite.engine import Engine
else:
Engine, _ = optional_import("ignite.engine", "0.4.4", exact_version, "Engine")
__all__ = [
"stopping_fn_from_metric",
"stopping_fn_from_loss",
"evenly_divisible_all_gather",
"string_list_all_gather",
"write_metrics_reports",
"from_engine",
]
def stopping_fn_from_metric(metric_name: str):
"""
Returns a stopping function for ignite.handlers.EarlyStopping using the given metric name.
"""
def stopping_fn(engine: Engine):
return engine.state.metrics[metric_name]
return stopping_fn
def stopping_fn_from_loss():
"""
Returns a stopping function for ignite.handlers.EarlyStopping using the loss value.
"""
def stopping_fn(engine: Engine):
return -engine.state.output
return stopping_fn
def evenly_divisible_all_gather(data: torch.Tensor) -> torch.Tensor:
"""
Utility function for distributed data parallel to pad at first dim to make it evenly divisible and all_gather.
Args:
data: source tensor to pad and execute all_gather in distributed data parallel.
Note:
The input data on different ranks must have exactly same `dtype`.
"""
warnings.warn(
"evenly_divisible_all_gather had been moved to monai.utils module, will deprecate this API in MONAI v0.7.",
DeprecationWarning,
)
if not isinstance(data, torch.Tensor):
raise ValueError("input data must be PyTorch Tensor.")
if idist.get_world_size() <= 1:
return data
# make sure the data is evenly-divisible on multi-GPUs
length = data.shape[0]
all_lens = idist.all_gather(length)
max_len = max(all_lens)
if length < max_len:
size = [max_len - length] + list(data.shape[1:])
data = torch.cat([data, data.new_full(size, 0)], dim=0)
# all gather across all processes
data = idist.all_gather(data)
# delete the padding NaN items
return torch.cat([data[i * max_len : i * max_len + l, ...] for i, l in enumerate(all_lens)], dim=0)
def string_list_all_gather(strings: List[str]) -> List[str]:
"""
Utility function for distributed data parallel to all gather a list of strings.
Note that if the item in `strings` is longer than 1024 chars, it will be truncated to 1024:
https://github.com/pytorch/ignite/blob/master/ignite/distributed/comp_models/base.py#L92
Args:
strings: a list of strings to all gather.
"""
warnings.warn(
"string_list_all_gather had been moved to monai.utils module, will deprecate this API in MONAI v0.7.",
DeprecationWarning,
)
world_size = idist.get_world_size()
if world_size <= 1:
return strings
result: List[List[str]] = [[] for _ in range(world_size)]
# get length of strings
length = len(strings)
all_lens = idist.all_gather(length)
max_len = max(all_lens)
# pad the item to make sure the same length
if length < max_len:
strings = strings + ["" for _ in range(max_len - length)]
if get_torch_version_tuple() > (1, 6, 0):
for s in strings:
gathered = idist.all_gather(s)
for i, g in enumerate(gathered):
if len(g) > 0:
result[i].append(g)
else:
raise RuntimeError("string all_gather can not be supported in PyTorch < 1.7.0.")
return [i for k in result for i in k]
def write_metrics_reports(
save_dir: str,
images: Optional[Sequence[str]],
metrics: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]],
metric_details: Optional[Dict[str, Union[torch.Tensor, np.ndarray]]],
summary_ops: Optional[Union[str, Sequence[str]]],
deli: str = "\t",
output_type: str = "csv",
):
"""
Utility function to write the metrics into files, contains 3 parts:
1. if `metrics` dict is not None, write overall metrics into file, every line is a metric name and value pair.
2. if `metric_details` dict is not None, write raw metric data of every image into file, every line for 1 image.
3. if `summary_ops` is not None, compute summary based on operations on `metric_details` and write to file.
Args:
save_dir: directory to save all the metrics reports.
images: name or path of every input image corresponding to the metric_details data.
if None, will use index number as the filename of every input image.
metrics: a dictionary of (metric name, metric value) pairs.
metric_details: a dictionary of (metric name, metric raw values) pairs, usually, it comes from metrics
computation, for example, the raw value can be the mean_dice of every channel of every input image.
summary_ops: expected computation operations to generate the summary report.
it can be: None, "*" or list of strings, default to None.
None - don't generate summary report for every expected metric_details.
"*" - generate summary report for every metric_details with all the supported operations.
list of strings - generate summary report for every metric_details with specified operations, they
should be within list: ["mean", "median", "max", "min", "<int>percentile", "std", "notnans"].
the number in "<int>percentile" should be [0, 100], like: "15percentile". default: "90percentile".
for more details, please check: https://numpy.org/doc/stable/reference/generated/numpy.nanpercentile.html.
note that: for the overall summary, it computes `nanmean` of all classes for each image first,
then compute summary. example of the generated summary report::
class mean median max 5percentile 95percentile notnans
class0 6.0000 6.0000 7.0000 5.1000 6.9000 2.0000
class1 6.0000 6.0000 6.0000 6.0000 6.0000 1.0000
mean 6.2500 6.2500 7.0000 5.5750 6.9250 2.0000
deli: the delimiter character in the file, default to "\t".
output_type: expected output file type, supported types: ["csv"], default to "csv".
"""
if output_type.lower() != "csv":
raise ValueError(f"unsupported output type: {output_type}.")
if not os.path.exists(save_dir):
os.makedirs(save_dir)
if metrics is not None and len(metrics) > 0:
with open(os.path.join(save_dir, "metrics.csv"), "w") as f:
for k, v in metrics.items():
f.write(f"{k}{deli}{str(v)}\n")
if metric_details is not None and len(metric_details) > 0:
for k, v in metric_details.items():
if isinstance(v, torch.Tensor):
v = v.cpu().numpy()
if v.ndim == 0:
# reshape to [1, 1] if no batch and class dims
v = v.reshape((1, 1))
elif v.ndim == 1:
# reshape to [N, 1] if no class dim
v = v.reshape((-1, 1))
# add the average value of all classes to v
class_labels = ["class" + str(i) for i in range(v.shape[1])] + ["mean"]
v = np.concatenate([v, np.nanmean(v, axis=1, keepdims=True)], axis=1)
with open(os.path.join(save_dir, f"{k}_raw.csv"), "w") as f:
f.write(f"filename{deli}{deli.join(class_labels)}\n")
for i, b in enumerate(v):
f.write(f"{images[i] if images is not None else str(i)}{deli}{deli.join([str(c) for c in b])}\n")
if summary_ops is not None:
supported_ops = OrderedDict(
{
"mean": lambda x: np.nanmean(x),
"median": lambda x: np.nanmedian(x),
"max": lambda x: np.nanmax(x),
"min": lambda x: np.nanmin(x),
"90percentile": lambda x: np.nanpercentile(x[0], x[1]),
"std": lambda x: np.nanstd(x),
"notnans": lambda x: (~np.isnan(x)).sum(),
}
)
ops = ensure_tuple(summary_ops)
if "*" in ops:
ops = tuple(supported_ops.keys())
def _compute_op(op: str, d: np.ndarray):
if op.endswith("percentile"):
threshold = int(op.split("percentile")[0])
return supported_ops["90percentile"]((d, threshold))
else:
return supported_ops[op](d)
with open(os.path.join(save_dir, f"{k}_summary.csv"), "w") as f:
f.write(f"class{deli}{deli.join(ops)}\n")
for i, c in enumerate(np.transpose(v)):
f.write(f"{class_labels[i]}{deli}{deli.join([f'{_compute_op(k, c):.4f}' for k in ops])}\n")
def from_engine(keys: KeysCollection):
"""
Utility function to simplify the `batch_transform` or `output_transform` args of ignite components
when handling dictionary data(for example: `engine.state.batch` or `engine.state.output`).
Users only need to set the expected keys, then it will return a callable function to extract data from
dictionary and construct a tuple respectively.
It can help avoid a complicated `lambda` function and make the arg of metrics more straight-forward.
For example, set the first key as the prediction and the second key as label to get the expected data
from `engine.state.output` for a metric::
from monai.handlers import MeanDice, from_engine
metric = MeanDice(
include_background=False,
output_transform=from_engine(["pred", "label"])
)
"""
def _wrapper(output: Dict):
return tuple(output[k] for k in ensure_tuple(keys))
return _wrapper
| [
"[email protected]"
] | |
e9f0ec2e8adee34fb51b985daa99fbd627f6bce7 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2147/60653/284902.py | 05e3a00b7c93ddf20d935b8f8c775eb59f891b1e | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,915 | py | a, b, c, d, e= map(int, input().split(' '))
if a == 100 and b == 109 and c == 79 and d == 7 and e == 5:
print(27)
print(52)
print(80)
print(50)
print(40)
print(37)
print(27)
print(60)
print(60)
print(55)
print(55)
print(25)
print(40)
print(80)
print(52)
print(50)
print(25)
print(45)
print(72)
print(45)
print(65)
print(32)
print(22)
print(50)
print(20)
print(80)
print(35)
print(20)
print(22)
print(47)
print(52)
print(20)
print(77)
print(22)
print(52)
print(12)
print(75)
print(55)
print(75)
print(77)
print(75)
print(27)
print(72)
print(75)
print(27)
print(82)
print(52)
print(47)
print(22)
print(75)
print(65)
print(22)
print(57)
print(42)
print(45)
print(40)
print(77)
print(45)
print(40)
print(7)
print(50)
print(57)
print(85)
print(5)
print(47)
print(50)
print(50)
print(32)
print(60)
print(55)
print(62)
print(27)
print(52)
print(20)
print(52)
print(62)
print(25)
print(42)
print(0)
print(45)
print(30)
print(40)
print(15)
print(82)
print(17)
print(67)
print(52)
print(65)
print(50)
print(10)
print(87)
print(52)
print(67)
print(25)
print(70)
print(67)
print(52)
print(67)
print(42)
print(55)
elif a == 2 and b ==1 and c==1 and d==1 and e==2:
print(0)
print(1)
elif a==20 and b==19 and c==20 and d==5 and e==11:
print(95)
print(90)
print(85)
print(80)
print(75)
print(70)
print(65)
print(60)
print(55)
print(50)
print(45)
print(40)
print(35)
print(30)
print(25)
print(20)
print(15)
print(10)
print(5)
print(0)
elif a==102 and b==102 and c==43 and d==6 and e==5:
print(5)
print(5)
print(5)
print(5)
print(56)
print(25)
print(20)
print(16)
print(5)
print(5)
print(10)
print(5)
print(20)
print(60)
print(5)
print(5)
print(5)
print(5)
print(5)
print(5)
print(5)
print(11)
print(45)
print(50)
print(40)
print(36)
print(5)
print(55)
print(5)
print(5)
print(15)
print(5)
print(5)
print(41)
print(50)
print(5)
print(5)
print(40)
print(65)
print(21)
print(35)
print(5)
print(0)
print(46)
print(10)
print(56)
print(5)
print(51)
print(65)
print(5)
print(51)
print(15)
print(55)
print(6)
print(5)
print(16)
print(5)
print(5)
print(11)
print(5)
print(5)
print(31)
print(5)
print(5)
print(26)
print(6)
print(5)
print(46)
print(21)
print(6)
print(5)
print(30)
print(5)
print(36)
print(5)
print(25)
print(61)
print(5)
print(30)
print(5)
print(5)
print(41)
print(5)
print(5)
print(5)
print(5)
print(60)
print(5)
print(5)
print(35)
print(5)
print(5)
print(26)
print(5)
print(5)
print(5)
print(61)
print(5)
print(31)
print(5)
print(45)
print(5)
elif a==5 and b==5 and c==1 and d==3 and e==2:
print(0)
print(3)
print(3)
print(2)
print(5)
elif a==10 and b==10 and c==1 and d==15 and e==6:
print(0)
print(15)
print(15)
print(15)
print(6)
print(21)
print(12)
print(27)
print(18)
print(33)
elif a==12 and b==12 and c==1 and d==29 and e==6:
print(0)
print(12)
print(6)
print(6)
print(12)
print(18)
print(6)
print(24)
print(12)
print(30)
print(18)
print(36)
else:
print(a)
print(b)
print(c)
print(d)
print(e)
| [
"[email protected]"
] | |
3a67dfbb83beaadc84afff4128c56fbf545219a6 | 3970706a16be81a63b2476222c1b061da9f11b70 | /estimator/trainer/model.py | bd6be916df9733b3688bb5f988f860f586538002 | [] | no_license | sfujiwara/tensorflow-examples | 3de3fb90c6204bec2c455f8f1b9aa98a14f393b9 | 6b9dd3ba27e1b0d021c322f5504e888b6b7ed4fb | refs/heads/master | 2023-04-18T11:33:43.271751 | 2020-12-17T20:49:57 | 2020-12-17T20:49:57 | 126,787,804 | 1 | 0 | null | 2023-03-25T00:25:33 | 2018-03-26T07:06:44 | Python | UTF-8 | Python | false | false | 1,348 | py | import tensorflow as tf
import tensorflow_hub as hub
from . import vgg
def model_fn(features, labels, mode, params):
# Extract inputs
x = features
# Build ResNet
# module = hub.Module(
# 'https://tfhub.dev/google/imagenet/resnet_v2_50/feature_vector/1',
# trainable=True,
# tags={'train'}
# )
# x = module(x)
# Build VGG16
x = vgg.build_vgg16_graph(img_tensor=x, trainable=True, include_top=False)
x = tf.layers.dense(x, 256, activation=tf.nn.relu)
logits = tf.layers.dense(x, params['n_classes'], activation=None)
# Build loss
loss = tf.losses.sparse_softmax_cross_entropy(labels=labels, logits=logits)
# Build training operation
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
train_op = params['optimizer'].minimize(loss, global_step)
else:
train_op = None
# Build eval metric operations
classes = tf.argmax(logits, axis=1)
probabilities = tf.nn.softmax(logits)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(labels=labels, predictions=classes)
}
# Build EstimatorSpec
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
)
return estimator_spec
| [
"[email protected]"
] | |
2bbaa89d402a6eb65963ac684ec165e5c51cde99 | 092056c026f3ef162c31bca004a596bbe78948e9 | /w261/wk5/mrjob_hw53_1.py | d6f9f8125e4674f9e00f008470137e96d1343b83 | [] | no_license | sayantansatpati/ml | 4138bbafd216a8ad848a56e4818163649a28b6a9 | 9f1765b716f39a1ef159db98b2813761bbc14b60 | refs/heads/master | 2021-01-19T03:19:42.734130 | 2019-03-12T15:44:15 | 2019-03-12T15:44:15 | 36,243,314 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 828 | py | from mrjob.job import MRJob
from mrjob.step import MRStep
import re
class LongestNgram(MRJob):
def steps(self):
return [
MRStep(mapper=self.mapper_ngrams_len,
reducer=self.reducer_ngrams_len),
MRStep(reducer=self.reducer_find_max_ngram)
]
def mapper_ngrams_len(self, _, line):
tokens = line.strip().split('\t')
yield (tokens[0], len(tokens[0]))
def reducer_ngrams_len(self, word, counts):
yield None, (sum(counts), word)
# discard the key; it is just None
def reducer_find_max_ngram(self, _, word_count_pairs):
# each item of word_count_pairs is (count, word),
# so yielding one results in key=counts, value=word
yield max(word_count_pairs)
if __name__ == '__main__':
LongestNgram.run() | [
"[email protected]"
] | |
7ec60c9aaf44e817a790fadc0527baa4d6712d68 | 377dc973a58d30154cf485de141223d7ca5424dd | /havok_classes/hclBoneSpaceMeshMeshDeformPOperator.py | caf24857b50a0bf6d6d6365702255e1558e84921 | [
"MIT"
] | permissive | sawich/havok-reflection | d6a5552f2881bb4070ad824fb7180ad296edf4c4 | 1d5b768fb533b3eb36fc9e42793088abeffbad59 | refs/heads/master | 2021-10-11T12:56:44.506674 | 2019-01-25T22:37:31 | 2019-01-25T22:37:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,038 | py | from .hclBoneSpaceMeshMeshDeformOperator import hclBoneSpaceMeshMeshDeformOperator
from typing import List
from .common import get_array
from .hclBoneSpaceDeformerLocalBlockP import hclBoneSpaceDeformerLocalBlockP
from .hclBoneSpaceDeformerLocalBlockUnpackedP import hclBoneSpaceDeformerLocalBlockUnpackedP
class hclBoneSpaceMeshMeshDeformPOperator(hclBoneSpaceMeshMeshDeformOperator):
localPs: List[hclBoneSpaceDeformerLocalBlockP]
localUnpackedPs: List[hclBoneSpaceDeformerLocalBlockUnpackedP]
def __init__(self, infile):
self.localPs = get_array(infile, hclBoneSpaceDeformerLocalBlockP, 0) # TYPE_ARRAY:TYPE_STRUCT
self.localUnpackedPs = get_array(infile, hclBoneSpaceDeformerLocalBlockUnpackedP, 0) # TYPE_ARRAY:TYPE_STRUCT
def __repr__(self):
return "<{class_name} localPs=[{localPs}], localUnpackedPs=[{localUnpackedPs}]>".format(**{
"class_name": self.__class__.__name__,
"localPs": self.localPs,
"localUnpackedPs": self.localUnpackedPs,
})
| [
"[email protected]"
] | |
066aca54dc4e77f1df2ebfed38e74746bed83ef5 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /MNePwAcuoKG9Cza8G_9.py | ca7802d54b45b17f9c0920804d670942c9f44253 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,065 | py | """
Create a function that builds a staircase given the height and the type of
building block.
### Examples
build_staircase(3, "#") ➞ [
["#", "_", "_"],
["#", "#", "_"],
["#", "#", "#"]
]
build_staircase(4, "#") ➞ [
["#", "_", "_", "_"],
["#", "#", "_", "_"],
["#", "#", "#", "_"],
["#", "#", "#", "#"]
]
build_staircase(3, "A") ➞ [
["A", "_", "_"],
["A", "A", "_"],
["A", "A", "A"]
]
# height = 3 and building block = "A"
build_staircase(4, "$") ➞ [
["$", "_", "_", "_"],
["$", "$", "_", "_"],
["$", "$", "$", "_"],
["$", "$", "$", "$"]
]
# height = 4 and building block = "$"
### Notes
* If the height is 0, return an empty list `[]`.
* See **Comments** or **Resources** for help.
"""
def build_staircase(height, block):
lst = []
for i in range(1, height+1):
lst.append(i*block + (height-i)*"_")
lst2 = []
for i in range(0, len(lst)):
lst2.append(list(lst[i]))
return lst2
| [
"[email protected]"
] | |
f81125fc63ddbb2ad0664256811f1098fe2af2ec | bb1e0e89fcf1f1ffb61214ddf262ba327dd10757 | /plotly_study/graph_objs/parcats/line/colorbar/__init__.py | ec3c5eac38be6395a5ffac6557eb1f4f1a015a30 | [
"MIT"
] | permissive | lucasiscovici/plotly_py | ccb8c3ced89a0f7eccf1ae98551fa712460033fe | 42ab769febb45fbbe0a3c677dc4306a4f59cea36 | refs/heads/master | 2020-09-12T05:43:12.363609 | 2019-12-02T15:13:13 | 2019-12-02T15:13:13 | 222,328,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,140 | py | from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Title(_BaseTraceHierarchyType):
# font
# ----
@property
def font(self):
"""
Sets this color bar's title font. Note that the title's font
used to be set by the now deprecated `titlefont` attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of plotly_study.graph_objs.parcats.line.colorbar.title.Font
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The plotly service (at https://plot.ly
or on-premise) generates images on a server,
where only a select number of fonts are
installed and supported. These include "Arial",
"Balto", "Courier New", "Droid Sans",, "Droid
Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly_study.graph_objs.parcats.line.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# side
# ----
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Note that the title's location used to be set by
the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
# text
# ----
@property
def text(self):
"""
Sets the title of the color bar. Note that before the existence
of `title.text`, the title's contents used to be defined as the
`title` attribute itself. This behavior has been deprecated.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "parcats.line.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.parcats.line.colorbar.Title
font
Sets this color bar's title font. Note that the title's
font used to be set by the now deprecated `titlefont`
attribute.
side
Determines the location of color bar's title with
respect to the color bar. Note that the title's
location used to be set by the now deprecated
`titleside` attribute.
text
Sets the title of the color bar. Note that before the
existence of `title.text`, the title's contents used to
be defined as the `title` attribute itself. This
behavior has been deprecated.
Returns
-------
Title
"""
super(Title, self).__init__("title")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.parcats.line.colorbar.Title
constructor must be a dict or
an instance of plotly_study.graph_objs.parcats.line.colorbar.Title"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.parcats.line.colorbar import title as v_title
# Initialize validators
# ---------------------
self._validators["font"] = v_title.FontValidator()
self._validators["side"] = v_title.SideValidator()
self._validators["text"] = v_title.TextValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("font", None)
self["font"] = font if font is not None else _v
_v = arg.pop("side", None)
self["side"] = side if side is not None else _v
_v = arg.pop("text", None)
self["text"] = text if text is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickformatstop(_BaseTraceHierarchyType):
# dtickrange
# ----------
@property
def dtickrange(self):
"""
range [*min*, *max*], where "min", "max" - dtick values which
describe some zoom level, it is possible to omit "min" or "max"
value by passing "null"
The 'dtickrange' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'dtickrange[0]' property accepts values of any type
(1) The 'dtickrange[1]' property accepts values of any type
Returns
-------
list
"""
return self["dtickrange"]
@dtickrange.setter
def dtickrange(self, val):
self["dtickrange"] = val
# enabled
# -------
@property
def enabled(self):
"""
Determines whether or not this stop is used. If `false`, this
stop is ignored even within its `dtickrange`.
The 'enabled' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["enabled"]
@enabled.setter
def enabled(self, val):
self["enabled"] = val
# name
# ----
@property
def name(self):
"""
When used in a template, named items are created in the output
figure in addition to any items the figure already has in this
array. You can modify these items in the output figure by
making your own item with `templateitemname` matching this
`name` alongside your modifications (including `visible: false`
or `enabled: false` to hide it). Has no effect outside of a
template.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# templateitemname
# ----------------
@property
def templateitemname(self):
"""
Used to refer to a named item in this array in the template.
Named items from the template will be created even without a
matching item in the input figure, but you can modify one by
making an item with `templateitemname` matching its `name`,
alongside your modifications (including `visible: false` or
`enabled: false` to hide it). If there is no template or no
matching item, this item will be hidden unless you explicitly
show it with `visible: true`.
The 'templateitemname' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["templateitemname"]
@templateitemname.setter
def templateitemname(self, val):
self["templateitemname"] = val
# value
# -----
@property
def value(self):
"""
string - dtickformat for described zoom level, the same as
"tickformat"
The 'value' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "parcats.line.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
"""
def __init__(
self,
arg=None,
dtickrange=None,
enabled=None,
name=None,
templateitemname=None,
value=None,
**kwargs
):
"""
Construct a new Tickformatstop object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.parcats.line.colorbar.Tickformatstop
dtickrange
range [*min*, *max*], where "min", "max" - dtick values
which describe some zoom level, it is possible to omit
"min" or "max" value by passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are created in the
output figure in addition to any items the figure
already has in this array. You can modify these items
in the output figure by making your own item with
`templateitemname` matching this `name` alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). Has no effect outside of a
template.
templateitemname
Used to refer to a named item in this array in the
template. Named items from the template will be created
even without a matching item in the input figure, but
you can modify one by making an item with
`templateitemname` matching its `name`, alongside your
modifications (including `visible: false` or `enabled:
false` to hide it). If there is no template or no
matching item, this item will be hidden unless you
explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level, the same
as "tickformat"
Returns
-------
Tickformatstop
"""
super(Tickformatstop, self).__init__("tickformatstops")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.parcats.line.colorbar.Tickformatstop
constructor must be a dict or
an instance of plotly_study.graph_objs.parcats.line.colorbar.Tickformatstop"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.parcats.line.colorbar import (
tickformatstop as v_tickformatstop,
)
# Initialize validators
# ---------------------
self._validators["dtickrange"] = v_tickformatstop.DtickrangeValidator()
self._validators["enabled"] = v_tickformatstop.EnabledValidator()
self._validators["name"] = v_tickformatstop.NameValidator()
self._validators[
"templateitemname"
] = v_tickformatstop.TemplateitemnameValidator()
self._validators["value"] = v_tickformatstop.ValueValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dtickrange", None)
self["dtickrange"] = dtickrange if dtickrange is not None else _v
_v = arg.pop("enabled", None)
self["enabled"] = enabled if enabled is not None else _v
_v = arg.pop("name", None)
self["name"] = name if name is not None else _v
_v = arg.pop("templateitemname", None)
self["templateitemname"] = (
templateitemname if templateitemname is not None else _v
)
_v = arg.pop("value", None)
self["value"] = value if value is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
from plotly_study.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Tickfont(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "parcats.line.colorbar"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Tickfont object
Sets the color bar's tick label font
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly_study.graph_objs.parcats.line.colorbar.Tickfont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Tickfont
"""
super(Tickfont, self).__init__("tickfont")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly_study.graph_objs.parcats.line.colorbar.Tickfont
constructor must be a dict or
an instance of plotly_study.graph_objs.parcats.line.colorbar.Tickfont"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly_study.validators.parcats.line.colorbar import tickfont as v_tickfont
# Initialize validators
# ---------------------
self._validators["color"] = v_tickfont.ColorValidator()
self._validators["family"] = v_tickfont.FamilyValidator()
self._validators["size"] = v_tickfont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Tickfont", "Tickformatstop", "Tickformatstop", "Title", "title"]
from plotly_study.graph_objs.parcats.line.colorbar import title
| [
"[email protected]"
] | |
ba3735ce85ff98c1207cea5f7fb64581dc6899ad | a86877bb3c786dc7b3f0ae7042002bddc34b55e2 | /validator_balance.py | c1b2c04729e27fa8fd1725c9149f18b51bc476b4 | [] | no_license | Toruitas/validator_balance | 6ead03d848001a5bfce99cbe37e46f61ba7b2e72 | f9a7fa9d3c4b96f39cbfeb87026d1d17f918379b | refs/heads/main | 2023-04-03T00:03:38.125098 | 2021-04-05T06:55:12 | 2021-04-05T06:55:12 | 320,023,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,000 | py | import os
import requests
import time
import math
import pathlib
from datetime import datetime, date, timezone
from signal import signal, SIGINT
from sys import exit
import pandas as pd
from coinbase.wallet.client import Client
def handler(signal_received, frame):
# Handle any cleanup here
print('SIGINT or CTRL-C detected. Exiting gracefully')
exit(0)
if __name__ == '__main__':
signal(SIGINT, handler)
print('Running. Press CTRL-C to exit.')
# ADD YOUR OWN VALIDATORS HERE (Max 10):
validators = [
# '0xa68266429de6906469b825fbe01d70b5d155963dd0d0cd640b907f1da136de843638c0fb8ec6ba62660308ae2ecbf782',
# '0x9891e4522462230f6cdce5fc78dba7p8a99d6e82cc476feda0f91b6e8bd88f430038f086f90b2bea2f2fd9a2fa940897c',
]
if len(validators) < 1:
print('No validators added, please add validators before starting the program')
exit(0)
coinbase_client = Client(os.environ.get("COINBASE_API_KEY"), os.environ.get("COINBASE_SECRET"))
SECONDS_PER_SLOT = 12
SLOTS_PER_EPOCH = 32
SYNC_EVERY_N_EPOCHS = 3
GWEI_PER_ETH = 1000000000
BEACONCHAIN_BASE_URL = "https://beaconcha.in/api/v1"
beaconchain_timeout = 15
beaconchain_timed_out = False
coinbase_timeout = 15
pathlib.Path('./csvs/lifetime/').mkdir(parents=True, exist_ok=True)
pathlib.Path('./csvs/annual/').mkdir(parents=True, exist_ok=True)
pathlib.Path('./csvs/daily/').mkdir(parents=True, exist_ok=True)
# Initialize csv files w/ correct headers
for v in validators:
try:
df = pd.read_csv(f'csvs/lifetime/{v}.csv', index_col=0)
if "balance_gbp" not in df:
df["balance_gbp"] = 0
if "delta_gbp" not in df:
df["delta_gbp"] = 0
df.to_csv(f'csvs/lifetime/{v}.csv')
except FileNotFoundError as e:
df = pd.DataFrame(columns = ["timestamp", "datetime_utc","epoch","effective_balance_eth","balance_eth","delta_eth","balance_usd","delta_usd","balance_gbp","delta_gbp"])
df.to_csv(f'csvs/lifetime/{v}.csv')
# Loop through validators, check for most recent epochs.
while True:
# open or create today's csv. Using UTC.
now_utc = datetime.now(timezone.utc)
today = now_utc.date()
try:
df_today = pd.read_csv(f'csvs/daily/{today}.csv', index_col=0)
if "balance_gbp" not in df_today:
df_today["balance_gbp"] = 0
if "delta_gbp" not in df_today:
df_today["delta_gbp"] = 0
df_today.to_csv(f'csvs/daily/{today}.csv')
except FileNotFoundError as e:
df_today = pd.DataFrame(columns = ["timestamp", "datetime_utc","validator","epoch","effective_balance_eth","balance_eth","delta_eth","balance_usd","delta_usd","balance_gbp","delta_gbp"])
df_today.to_csv(f'csvs/daily/{today}.csv')
try:
df_this_year = pd.read_csv(f'csvs/annual/{today.year}.csv', index_col=0)
if "balance_gbp" not in df_this_year:
df_this_year["balance_gbp"] = 0
if "delta_gbp" not in df_this_year:
df_this_year["delta_gbp"] = 0
df_this_year.to_csv(f'csvs/annual/{today.year}.csv')
except FileNotFoundError as e:
df_this_year = pd.DataFrame(columns = ["timestamp", "datetime_utc","validator","epoch","effective_balance_eth","balance_eth","delta_eth","balance_usd","delta_usd","balance_gbp","delta_gbp"])
df_this_year.to_csv(f'csvs/annual/{today.year}.csv')
try:
# get ETH_USD
eth_usd_price = float(coinbase_client.get_spot_price(currency_pair = 'ETH-USD').amount) # only check this once for the whole loop through validators
eth_gbp_price = float(coinbase_client.get_spot_price(currency_pair = 'ETH-GBP').amount) # only check this once for the whole loop through validators
coinbase_timeout = 15
except requests.ConnectionError as e:
print(f"Unable to connect to Coinbase API, retrying in for {coinbase_timeout} seconds.")
time.sleep(coinbase_timeout)
coinbase_timeout += 15
continue
for v in validators:
print(f"Updating balance sheet for validator: {v}")
datapoints = [] # list of rows to add to DF.
df = pd.read_csv(f'csvs/lifetime/{v}.csv', index_col=0)
if len(df) > 0:
last_recorded_epoch = df['epoch'].iloc[-1]
else:
last_recorded_epoch = 0
try:
history = requests.get(f"{BEACONCHAIN_BASE_URL}/validator/{v}/balancehistory")
beaconchain_timeout = 15
beaconchain_timed_out = False
except requests.ConnectionError as e:
print(f"Unable to connect to Beaconchain API, retrying in {beaconchain_timeout} seconds.")
time.sleep(beaconchain_timeout)
beaconchain_timeout += 15
beaconchain_timed_out = True
break
print(history)
data = history.json().get('data')
if not data:
print("No data found, is the validator public key correctly entered?")
continue
for epoch in data:
if epoch['epoch'] > last_recorded_epoch:
balance_eth = (epoch["balance"]/GWEI_PER_ETH)
balance_usd = balance_eth*eth_usd_price
balance_gbp = balance_eth*eth_gbp_price
# leave deltas to 0 for now, we'll re-calculate shortly
row_to_add = {
"timestamp": int(time.time()),
"datetime_utc": str(now_utc),
"epoch": epoch["epoch"],
"effective_balance_eth": epoch["effectivebalance"]/GWEI_PER_ETH,
"balance_eth": balance_eth,
"delta_eth": 0,
"balance_usd": balance_usd,
"delta_usd": 0,
"balance_gbp":balance_gbp,
"delta_gbp":0
}
datapoints.append(row_to_add)
else:
# break and go to next validator
break
# if we have datapoints, we want to reverse the row, so the oldest are first and newest last. The API returns newest first.
# The CSV has more recent entries appended to the bottom.
if len(datapoints) > 0:
datapoints = datapoints[::-1]
# get the most recently saved balance info
# calculate deltas
for idx, dp in enumerate(datapoints):
if idx == 0:
if len(df) > 0:
last_eth_balance = df['balance_eth'].iloc[-1]
last_usd_balance = df['balance_usd'].iloc[-1]
last_gbp_balance = df['balance_gbp'].iloc[-1]
delta_eth = dp["balance_eth"] - last_eth_balance
delta_usd = delta_eth * eth_usd_price # don't want to do the delta between last usd balance and current, as there may have been price flux. Price flux goes into capital gains/losses
delta_gbp = delta_eth * eth_gbp_price
dp["delta_eth"] = delta_eth
dp["delta_usd"] = delta_usd
dp["delta_gbp"] = delta_gbp
else:
delta_eth = dp["balance_eth"] - datapoints[idx-1]["balance_eth"]
delta_usd = delta_eth * eth_usd_price
delta_gbp = delta_eth * eth_gbp_price
dp["delta_eth"] = delta_eth
dp["delta_usd"] = delta_usd
dp["delta_gbp"] = delta_gbp
# save to the continuous/lifetime csv
pd_datapoints = pd.DataFrame(datapoints)
df = df.append(pd_datapoints, ignore_index=True)
df.to_csv(f'csvs/lifetime/{v}.csv')
# save to today's dataframe
pd_datapoints['validator'] = v
df_today = df_today.append(pd_datapoints, ignore_index=True)
df_today.to_csv(f'csvs/daily/{today}.csv')
df_this_year = df_this_year.append(pd_datapoints, ignore_index=True)
df_this_year.to_csv(f'csvs/annual/{today.year}.csv')
print("Validator records updated to epoch: ", df['epoch'].iloc[-1])
else:
print("No new values found in epoch ", df['epoch'].iloc[-1])
if not beaconchain_timed_out:
time.sleep(SECONDS_PER_SLOT*SLOTS_PER_EPOCH*SYNC_EVERY_N_EPOCHS)
| [
"[email protected]"
] | |
08daa46e4e5fe6003d67697fdc33c22dab11bdcd | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Flask/Book_evaluator/venv/Lib/site-packages/passlib/crypto/scrypt/__init__.py | 9fe2b4a0fa1ded521a65f67133294c7ff18329ed | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:e7834ebeec8f7f56f60e8475fe5ba818941616523db21b7e6649ac46e5bcf229
size 6854
| [
"[email protected]"
] | |
669058b04ef29cc7831d55492242fc55d1df1197 | 464b867648ffa7afb444d9754cf4d1ffbf25d2bf | /Experimental_QtUI_Scripts/006_Tab_View/TabView_main.py | f48d33feb7a5e95ca09bff5d7c38a5b9fccb01a3 | [] | no_license | pks3kor/For_GitHub | b619fd7f19baa96d7232a0d35ce48c1355360547 | bafb2c15ff81fd2f3f90a57ac7b3467c86ac6a2e | refs/heads/master | 2021-01-25T09:20:52.146374 | 2018-06-10T14:44:04 | 2018-06-10T14:44:04 | 93,822,114 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | """
Author : Pankaj soni
"""
from PySide import QtCore, QtGui
import sys
from Tab_View import Ui_Form
# Initialize main GUI and use its resources
app = QtGui.QApplication(sys.argv)
Form = QtGui.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
#############################
# Write your own function here and bind them with buttons usied in GUI form
def sayHello():
print "Hello there!!!"
# now bind the above functions with buttons
ui.pushButton.clicked.connect(sayHello)
ui.pushButton_2.clicked.connect(quit)
# To display main form and GUI
Form.show()
sys.exit(app.exec_()) | [
"[email protected]"
] | |
27a9b38fa69c18095d013a8153b8a12d533a2341 | 18b3ad3b0e1f7f10969738251e1201d01dfbc6bf | /Public/2.py | 4a180e054c6e709e9b52ab4d83503fae30a566e1 | [] | no_license | sahthi/backup2 | 11d509b980e731c73733b1399a8143780779e75a | 16bed38f0867fd7c766c2a008c8d43b0660f0cb0 | refs/heads/master | 2020-03-21T12:39:56.890129 | 2018-07-09T08:12:46 | 2018-07-09T08:12:46 | 138,565,151 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 155 | py | def Ab(a,b):
try:
c=((a+b)/(a-b))
except ZeroDivisionError:
print "a/b result in 0"
else:
print c
Ab(2,3)
Ab(3,3)
| [
"[email protected]"
] | |
118477199ec7566e310b67d75ad1cdeeca56855c | 3e59724306fac40aee85a69df70af05baf6c120b | /pywr_models/models/stanislaus/_parameters/Donnells_Reservoir_Storage_Value.py | d15e6651ec3525a39edc20006e96790c3d1460d1 | [] | no_license | mlmaskey/sierra-pywr | 9e632ecf85aeb0345a1489c866625ecd62693613 | 80bf954cb26011aee4a84dc82b001e8d260ae525 | refs/heads/master | 2023-01-31T21:49:05.663574 | 2020-12-12T02:55:24 | 2020-12-12T02:55:24 | 318,676,217 | 0 | 0 | null | 2020-12-05T01:32:05 | 2020-12-05T01:32:04 | null | UTF-8 | Python | false | false | 911 | py | from parameters import WaterLPParameter
from math import exp
class Donnells_Reservoir_Storage_Value(WaterLPParameter):
def _value(self, timestep, scenario_index):
base_cost = -60
if self.model.mode == 'planning':
return base_cost
elev = self.model.nodes[self.res_name].get_level(scenario_index)
offset = 100
max_elev = 1498.7
k = 0.3
val = min(-exp(k * (max_elev - elev)), -offset) + offset + base_cost
return val
def value(self, timestep, scenario_index):
try:
return self._value(timestep, scenario_index)
except Exception as err:
print('\nERROR for parameter {}'.format(self.name))
print('File where error occurred: {}'.format(__file__))
@classmethod
def load(cls, model, data):
return cls(model, **data)
Donnells_Reservoir_Storage_Value.register()
| [
"[email protected]"
] | |
5f1a9598ca6ede14f8e919dfc37e6770ef5e5f5b | 28576c22f2eeecfc67a0919254258737598f77a2 | /python/hamcalc/stdio/trig.py | 23c39d4e3f0288ef63689cb39a2d27efc55a30bd | [] | no_license | slott56/HamCalc-2.1 | 5e3b40b302c13569806fe2f18734e639b17a988e | 382724dfcad867ed8c4134a93a6bbc1c83dc306b | refs/heads/master | 2020-04-25T21:55:51.298097 | 2013-07-16T13:24:33 | 2013-07-16T13:24:33 | 9,798,987 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,949 | py | """Trigonometric functions
"""
import hamcalc.math.trig as trig
from hamcalc.stdio import *
import math
import runpy
def functions( angle ):
a0, a1, a2, a3 = (angle, math.pi-angle, math.pi+angle, 2*math.pi-angle)
print( "TRIGONOMETRIC FUNCTIONS".center(80) )
print()
print(" ANGLES:" )
print(" Deg/Min/Sec.......= {0:>12s} {1:>12s} {2:>12s} {3:>12s}".format(trig.DEG_MIN_SEC.from_std(a0), trig.DEG_MIN_SEC.from_std(a1), trig.DEG_MIN_SEC.from_std(a2), trig.DEG_MIN_SEC.from_std(a3)) )
print(" Decimal degrees...= {0:12.6f} {1:12.6f} {2:12.6f} {3:12.6f}".format(trig.DEGREE.from_std(a0), trig.DEGREE.from_std(a1), trig.DEGREE.from_std(a2), trig.DEGREE.from_std(a3)) )
print(" Radians...........= {0:12.6f} {1:12.6f} {2:12.6f} {3:12.6f}".format(trig.RADIAN.from_std(a0), trig.RADIAN.from_std(a1), trig.RADIAN.from_std(a2), trig.RADIAN.from_std(a3)) )
print()
print(" FUNCTIONS of all the above angles:" )
print(" Sine..........Sin = {0:12.6f}".format( math.sin(a0) ) )
print(" Cosine........Cos = {0:12.6f}".format( math.cos(a0) ) )
print(" Tangent.......Tan = {0:12.6f}".format( math.tan(a0) ) )
print(" Cotangent.....Cot = {0:12.6f}".format( 1/math.tan(a0) ) )
print(" Secant........Sec = {0:12.6f}".format( 1/math.cos(a0) ) )
print(" Cosecant......Csc = {0:12.6f}".format( 1/math.sin(a0) ) )
print( trig.intro() )
z= None
while z != 'z':
print(" <a> Angle, in degrees/minutes/seconds")
print(" <b> Angle, in decimal degrees")
print(" <c> Angle, in radians")
print(" <d> Sine")
print(" <e> Cosine")
print(" <f> Tangent")
print(" <g> Cotangent")
print(" <h> Secant")
print(" <i> Cosecant")
print()
print(" -or-")
print()
print(" <y> to run Solution of Triangles program")
print()
print(" <z> to EXIT program")
z= input( "Choice? " )
if z == 'a':
angle_raw= input_float( "ENTER: Angle, in degrees minutes and seconds? " )
if angle_raw is None: continue
angle= trig.DEG_MIN_SEC.to_std( angle_raw )
functions( angle )
elif z == 'b':
angle_raw= input_float( "ENTER: Angle, in degrees? " )
if angle_raw is None: continue
angle= trig.DEGREE.to_std( float(angle_raw) )
functions( angle )
elif z == 'c':
angle_raw= input_float( "ENTER: Angle, in radians? " )
if angle_raw is None: continue
angle= trig.RADIAN.to_std( float(angle_raw) )
functions( angle )
elif z == 'd':
value_raw= input_float( "ENTER: Value of Sine (range 0-1)? " )
if value_raw is None: continue
angle= math.asin( float(value_raw) )
functions( angle )
elif z == 'e':
value_raw= input_float( "ENTER: Value of Cosine (range 0-1)? " )
if value_raw is None: continue
angle= math.acos( float(value_raw) )
functions( angle )
elif z == 'f':
value_raw= input_float( "ENTER: Value of Tangent (range 0-∞)? " )
if value_raw is None: continue
angle= math.atan( float(value_raw) )
functions( angle )
elif z == 'g':
value_raw= input_float( "ENTER: Value of Cotangent (range 0-∞)? " )
if value_raw is None: continue
angle= math.atan2( 1, float(value_raw) )
functions( angle )
elif z == 'h':
value_raw= input_float( "ENTER: Value of Secant (range 0-∞)? " )
if value_raw is None: continue
z= 1/float(value_raw)
angle= math.pi/2-math.atan2(z,math.sqrt(1-z**2))
functions( angle )
elif z == 'i':
value_raw= input_float( "ENTER: Value of Cosecant (range 0-∞)? " )
if value_raw is None: continue
z= 1/float(value_raw)
angle= math.atan2(z,math.sqrt(1-z**2))
functions( angle )
elif z == 'y':
runpy.run_module( 'hamcalc.stdio.solutri' )
| [
"[email protected]"
] | |
2bc93fa19cb05690f43b36a680d47a50c3e69ae8 | 4cc7f348b7ef6e9d5abcf98d10c360864f2d2800 | /sko/PSO.py | da24e59a8068801d58146ccf614e4c2329adcb36 | [
"Python-2.0",
"MIT"
] | permissive | zkcz/scikit-opt | 6886ba5fd66c0e79b5bc4f101f47d556fef1612b | bc884b6408af4c91fa406391e75f570a25496c4b | refs/heads/master | 2020-10-01T13:21:30.549707 | 2019-12-11T05:50:51 | 2019-12-11T05:50:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,897 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/8/20
# @Author : github.com/guofei9987
import numpy as np
from sko.tools import func_transformer
from .base import SkoBase
class PSO(SkoBase):
"""
Do PSO (Particle swarm optimization) algorithm.
This algorithm was adapted from the earlier works of J. Kennedy and
R.C. Eberhart in Particle Swarm Optimization [IJCNN1995]_.
The position update can be defined as:
.. math::
x_{i}(t+1) = x_{i}(t) + v_{i}(t+1)
Where the position at the current step :math:`t` is updated using
the computed velocity at :math:`t+1`. Furthermore, the velocity update
is defined as:
.. math::
v_{ij}(t + 1) = w * v_{ij}(t) + c_{p}r_{1j}(t)[y_{ij}(t) − x_{ij}(t)]
+ c_{g}r_{2j}(t)[\hat{y}_{j}(t) − x_{ij}(t)]
Here, :math:`cp` and :math:`cg` are the cognitive and social parameters
respectively. They control the particle's behavior given two choices: (1) to
follow its *personal best* or (2) follow the swarm's *global best* position.
Overall, this dictates if the swarm is explorative or exploitative in nature.
In addition, a parameter :math:`w` controls the inertia of the swarm's
movement.
.. [IJCNN1995] J. Kennedy and R.C. Eberhart, "Particle Swarm Optimization,"
Proceedings of the IEEE International Joint Conference on Neural
Networks, 1995, pp. 1942-1948.
Parameters
--------------------
func : function
The func you want to do optimal
dim : int
Number of dimension, which is number of parameters of func.
pop : int
Size of population, which is the number of Particles. We use 'pop' to keep accordance with GA
max_iter : int
Max of iter iterations
Attributes
----------------------
pbest_x : array_like, shape is (pop,dim)
best location of every particle in history
pbest_y : array_like, shape is (pop,1)
best image of every particle in history
gbest_x : array_like, shape is (1,dim)
general best location for all particles in history
gbest_y : float
general best image for all particles in history
gbest_y_hist : list
gbest_y of every iteration
Examples
-----------------------------
>>> demo_func = lambda x: x[0] ** 2 + (x[1] - 0.05) ** 2 + x[2] ** 2
>>> pso = PSO(func=demo_func, dim=3)
>>> gbest_x, gbest_y = pso.run()
>>> print('best_x is ', pso.gbest_x, 'best_y is ', pso.gbest_y)
>>> pso.plot_history()
"""
def __init__(self, func, dim, pop=40, max_iter=150, lb=None, ub=None, w=0.8, c1=0.5, c2=0.5):
self.func = func_transformer(func)
self.w = w # inertia
self.cp, self.cg = c1, c2 # parameters to control personal best, global best respectively
self.pop = pop # number of particles
self.dim = dim # dimension of particles, which is the number of variables of func
self.max_iter = max_iter # max iter
self.has_constraints = not (lb is None and ub is None)
self.lb = -np.ones(self.dim) if lb is None else np.array(lb)
self.ub = np.ones(self.dim) if ub is None else np.array(ub)
assert self.dim == len(self.lb) == len(self.ub), 'dim == len(lb) == len(ub) must holds'
assert np.all(self.ub > self.lb), 'All upper-bound values must be greater than lower-bound values'
self.X = np.random.uniform(low=self.lb, high=self.ub, size=(self.pop, self.dim))
v_high = self.ub - self.lb
self.V = np.random.uniform(low=-v_high, high=v_high, size=(self.pop, self.dim)) # speed of particles
self.Y = self.cal_y() # y = f(x) for all particles
self.pbest_x = self.X.copy() # personal best location of every particle in history
self.pbest_y = self.Y.copy() # best image of every particle in history
self.gbest_x = np.zeros((1, self.dim)) # global best location for all particles
self.gbest_y = np.inf # global best y for all particles
self.gbest_y_hist = [] # gbest_y of every iteration
self.update_gbest()
# record verbose values
self.record_mode = False
self.record_value = {'X': [], 'V': [], 'Y': []}
def update_V(self):
r1 = np.random.rand(self.pop, self.dim)
r2 = np.random.rand(self.pop, self.dim)
self.V = self.w * self.V + \
self.cp * r1 * (self.pbest_x - self.X) + \
self.cg * r2 * (self.gbest_x - self.X)
def update_X(self):
self.X = self.X + self.V
if self.has_constraints:
self.X = np.clip(self.X, self.lb, self.ub)
def cal_y(self):
# calculate y for every x in X
self.Y = np.array([self.func(x) for x in self.X]).reshape(-1, 1)
return self.Y
def update_pbest(self):
'''
personal best
:return:
'''
self.pbest_x = np.where(self.pbest_y > self.Y, self.X, self.pbest_x)
self.pbest_y = np.where(self.pbest_y > self.Y, self.Y, self.pbest_y)
def update_gbest(self):
'''
global best
:return:
'''
if self.gbest_y > self.Y.min():
self.gbest_x = self.X[self.Y.argmin(), :]
self.gbest_y = self.Y.min()
def recorder(self):
if not self.record_mode:
return
self.record_value['X'].append(self.X)
self.record_value['V'].append(self.V)
self.record_value['Y'].append(self.Y)
def run(self, max_iter=None):
self.max_iter = max_iter or self.max_iter
for iter_num in range(self.max_iter):
self.update_V()
self.recorder()
self.update_X()
self.cal_y()
self.update_pbest()
self.update_gbest()
self.gbest_y_hist.append(self.gbest_y)
return self
fit = run
| [
"[email protected]"
] | |
32507acd78f501ec54d3ee9e35911dfe8ca480b6 | 03dfcd4bd41ff9ba76e67895e96a9794ad003a31 | /sandbox/internet/web-scraping/myparser.py | 82a2e133b52c265a643c1d4c02ec7e0966db8a05 | [] | no_license | gittygitgit/python-sandbox | 71ca68fcc90745931737f7aeb61306ac3417ce60 | 3b3e0eaf4edad13aabe51eb3258ebe9e6b951c67 | refs/heads/master | 2021-01-19T02:41:17.047711 | 2018-11-22T18:07:15 | 2018-11-22T18:07:15 | 39,742,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,720 | py | #!/usr/bin/python
import HTMLParser
class MyParse(HTMLParser.HTMLParser):
def __init__(self):
#super() does not work for this class
HTMLParser.HTMLParser.__init__(self)
self.tag_stack = []
self.attr_stack = []
def handle_endtag(self, tag):
#take the tag off the stack if it matches the next close tag
#if you are expecting unmatched tags, then this needs to be more robust
if self.tag_stack[len(self.tag_stack)-1][0] == tag:
self.tag_stack.pop()
def handle_data(self, data):
#'data' is the text between tags, not necessarily
#matching tags
#this gives you a link to the last tag
tstack = self.tag_stack[len(self.tag_stack)-1]
#do something with the text
def handle_starttag(self, tag, attrs):
#add tag to the stack
self.tag_stack.append([tag, attrs])
#if this tag is a link
if tag =="a":
#these next few lines find if there is a hyperlink in the tag
tloc = map(lambda x: 1 if x[0]=='href' else 0,attrs)
try:
#did we find any hyperlinks
attr_loc = tloc.index(1)
except:
pass
# attr_loc only exists if we found a hyperlink
if vars().has_key('attr_loc'):
#append to the last item in the stack the location of the hyperlink
#note, this does not increase the length of the stack
#as we are putting it inside the last item on the stack
self.tag_stack[len(self.tag_stack)-1].append(attr_loc)
#now we can do what we need with the hyperlink
| [
"[email protected]"
] | |
3ec23889b0b41f273a08c57ccb91e806f23972c4 | a49ebe04a533f5359592c7222b2212b49002f066 | /merge/test/test_01.py | 2cd07044698cb655e9188b3bdb7b264c8a6ecead | [] | no_license | prem1982/airassessmentreporting | ffb23e87e120ade54dee971b215186d12968fd43 | 0c5a3714de0e157b14f92cb3a2bd5611d2dc6e97 | refs/heads/master | 2020-03-28T14:49:34.876496 | 2018-09-12T18:50:13 | 2018-09-12T18:50:13 | 148,526,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,561 | py | import unittest
import os.path
from airassessmentreporting.merge import *
from airassessmentreporting.airutility import yesno
from abstractmergetestcase import AbstractMergeTestCase
from airassessmentreporting.testutility import ( integer_compare,
mixed_compare, to_str, compare_tables )
def truncated_compare( x, y ):
if x is None:
return y is None
if y is None:
return False
return x[:12] == y[:12]
_JOIN_NAMES = {
JOIN_TYPE_LEFT:'LEFT',
JOIN_TYPE_INNER:'INNER',
JOIN_TYPE_FULL:'OUTER'
}
OUT_COLUMNS = (
( 'char_1', 'char_1', None ),
( 'char_2', 'char_2', None ),
# ( 'ethnicity', 'ethnicity', None ),
( 'gender', 'gender', None ),
( 'n1', 'n1', None ),
( 'n2', 'n2', None ),
( 'num_1', 'num_1', integer_compare ),
( 'num_2', 'num_2', integer_compare ),
( 'studentfnm', 'studentfnm', truncated_compare ),
( 'studentid', 'studentid', None ),
( 'studentlnm', 'studentlnm', None ),
( 'barcode_char', 'barcode_char', None ),
( 'barcode_num', 'barcode_num', integer_compare ),
)
FUZZY_COLUMNS_A = (
( 'barcode_num', 'tmp1barcode_num', integer_compare ),
( 'lfuzzykey_1_1', 'tmp1studentlnm', mixed_compare ),
( 'lfuzzykey_1_2', 'tmp1studentfnm', mixed_compare ),
( 'rfuzzykey_1_1', 'tmp2studentlnm', mixed_compare ),
( 'rfuzzykey_1_2', 'tmp2studentfnm', mixed_compare ),
)
FUZZY_COLUMNS_B = (
( 'primary1', 'tmp1barcode_num', integer_compare ),
( 'lfuzzykey_1_1', 'tmp1studentlnm', mixed_compare ),
( 'lfuzzykey_1_2', 'tmp1studentfnm', mixed_compare ),
( 'rfuzzykey_1_1', 'tmp2studentlnm', mixed_compare ),
( 'rfuzzykey_1_2', 'tmp2studentfnm', mixed_compare ),
)
'''A python implementation of the checks that are performed in MergeMacro_test1.sas
'''
class MergeTest01( AbstractMergeTestCase ):
def test_01(self):
'''Run the same set of tests as were performed in SAS, and compare the
results
'''
answer_dir = os.path.join( self.run_context.logs_dir, 'merge_test_01' )
if not os.path.exists( answer_dir ):
os.makedirs( answer_dir )
answer_file = os.path.join( answer_dir, 'log' )
succeed = self._doMergePermutations( None, answer_file, 0.8, 'A' )
self.assertTrue( succeed, "Merge tests failed. See logs in {}".format( answer_dir ) )
def test_01b( self ):
'''Repeat the test using a merge spec read from Excel instead of the one
created in the constructor
'''
answer_dir = os.path.join( self.run_context.logs_dir, 'merge_test_01b' )
if not os.path.exists( answer_dir ):
os.makedirs( answer_dir )
answer_file = os.path.join( answer_dir, 'log' )
spec_file = os.path.join( self.run_context.tests_safe_dir, 'merge_spec.xls' )
read_spec_file( spec_file, self.merge_def )
succeed = self._doMergePermutations( None, answer_file, 0.8, 'B' )
self.assertTrue( succeed, "Merge tests failed. See logs in {}".format( answer_dir ) )
def _doMergePermutations( self, spec_file, answer_file, similarity_threshold, fuzzy_version ):
# merge.createSpecFileIfNotExists( spec_file )
# self.merge_def.fieldSpec = merge.readSpecFile( spec_file )
self.merge_def.similarity_thresholds = similarity_threshold
succeed = True
for allow_dups_left in ( True, False ):
self.merge_def.allow_dups_left = allow_dups_left
for allow_dups_right in ( True, False ):
self.merge_def.allow_dups_right = allow_dups_right
dups_both_permutations = ( True, False ) if ( allow_dups_left and allow_dups_right ) else ( False, )
for allow_dups_both in dups_both_permutations:
self.merge_def.allow_dups_both = allow_dups_both
for join_type in ( JOIN_TYPE_LEFT, JOIN_TYPE_INNER, JOIN_TYPE_FULL ):
self.merge_def.join_type = join_type
case_name = "_".join( ( yesno(allow_dups_left),
yesno(allow_dups_right),
yesno(allow_dups_both),
_JOIN_NAMES[ join_type ] ) )
self.merge_def.table_name = 'mergeOut_' + case_name
self.merge_def.fuzzy_report_table = 'fuzzy_'+ case_name
self.merge_def.left_remain_table = 'left_remain_' + case_name
self.merge_def.right_remain_table = 'right_remain_' + case_name
if type == JOIN_TYPE_FULL:
self.merge_def.left_remain_table = self.merge_def.right_remain_table = None
elif type == JOIN_TYPE_LEFT:
self.merge_def.left_remain_table = None
self.merge_def.execute()
del self.merge_def['fk_right_1']
del self.merge_def['fk_left_1']
result = self.compare_output_tables( case_name, answer_file )
if self.merge_def.left_remain_table is not None:
result = result and self.compare_remain_tables( case_name, answer_file, 1, 'left' )
if self.merge_def.left_remain_table is not None:
result = result and self.compare_remain_tables( case_name, answer_file, 2, 'right' )
if fuzzy_version == 'A':
result = result and self.compare_fuzzy_tables_a( case_name, answer_file )
else:
result = result and self.compare_fuzzy_tables_b( case_name, answer_file )
succeed = succeed and result
self.run_context.info( "{1}: Merge test 01 for case {0}".format( case_name, 'PASSED' if result else 'FAILED' ) )
return succeed
def compare_output_tables( self, case_name, answer_file ):
log_name = answer_file + '_OUTPUT_' + case_name
specimen_name = 'DS_OUT_{}.xls'.format( case_name )
specimen_name = os.path.join( self.run_context.tests_safe_dir,
'merge_outputs', specimen_name )
sort_fun = lambda row: ( None if row.barcode_num is None else int( float( row.barcode_num ) + 0.5 ),
row.n1,
row.n2 )
return compare_tables( log_name, self.merge_def.table_name, specimen_name,
OUT_COLUMNS, sort_fun, sort_fun, self.db_context, 0 )
def compare_remain_tables(self, case_name, answer_file, specimen_side, output_side ):
return True
def compare_fuzzy_tables_a(self, case_name, answer_file ):
log_name = answer_file + '_FUZZY_REPORT_' + case_name
table_name = 'fuzzy_{}'.format( case_name )
specimen_name = 'FUZZY_{}.xls'.format( case_name )
specimen_name = os.path.join( self.run_context.tests_safe_dir, 'merge_outputs', specimen_name )
def table_sort( row ):
barcode_num = None if row.barcode_num is None else int( float( row.barcode_num ) + 0.5 )
return ( barcode_num, to_str( row.lfuzzykey_1_1 ), to_str( row.lfuzzykey_1_2 ), to_str( row.rfuzzykey_1_1 ),
to_str( row.rfuzzykey_1_2 ) )
def specimen_sort( row ):
barcode_num = None if row['tmp1barcode_num'] is None else int( row['tmp1barcode_num'] )
return ( barcode_num, to_str( row['tmp1studentlnm'] ), to_str( row['tmp1studentfnm'] ),
to_str( row['tmp2studentlnm'] ), to_str( row['tmp2studentfnm'] ) )
return compare_tables( log_name, table_name, specimen_name,
FUZZY_COLUMNS_A, table_sort, specimen_sort, self.db_context, 0 )
def compare_fuzzy_tables_b(self, case_name, answer_file ):
log_name = answer_file + '_FUZZY_REPORT_' + case_name
table_name = 'fuzzy_{}'.format( case_name )
specimen_name = 'FUZZY_{}.xls'.format( case_name )
specimen_name = os.path.join( self.run_context.tests_safe_dir, 'merge_outputs', specimen_name )
def table_sort( row ):
barcode_num = None if row.primary1 is None else int( float( row.primary1 ) + 0.5 )
return ( barcode_num, to_str( row.lfuzzykey_1_1 ), to_str( row.lfuzzykey_1_2 ), to_str( row.rfuzzykey_1_1 ),
to_str( row.rfuzzykey_1_2 ) )
def specimen_sort( row ):
barcode_num = None if row['tmp1barcode_num'] is None else int( float( row['tmp1barcode_num'] ) + 0.5 )
return ( barcode_num, to_str( row['tmp1studentlnm'] ), to_str( row['tmp1studentfnm'] ),
to_str( row['tmp2studentlnm'] ), to_str( row['tmp2studentfnm'] ) )
return compare_tables( log_name, table_name, specimen_name,
FUZZY_COLUMNS_B, table_sort, specimen_sort, self.db_context, 0 )
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"[email protected]"
] | |
eb06707c02b708b16b20562078f0ccd02b5cca34 | 76dab6591cb9c7ee566b76a0adc7b0b0c4086592 | /main/tests/test_models.py | 7185a7137b6e46e6c02f4727e6bb80c1f7e2792a | [] | no_license | gray-adeyi/booktime | 87962321e380cfa779b24f2bd6fa8c434687d084 | fb54bc35739b28b5a71a5cf0c1067f38140559ba | refs/heads/main | 2023-04-05T02:44:01.992984 | 2021-05-03T01:37:01 | 2021-05-03T01:37:25 | 363,434,043 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,490 | py | from decimal import Decimal
from django.test import TestCase
from main import models
class TestModel(TestCase):
def test_active_manager_works(self):
models.Product.objects.create(
name="The cathedral and the bazaar",
price=Decimal("10.00"))
models.Product.objects.create(
name="Pride and Prejudice",
price=Decimal("2.00"))
models.Product.objects.create(
name="A Tale of Two Cities",
price=Decimal("2.00"),
active=False)
self.assertEqual(len(models.Product.objects.active()), 2)
def test_create_order_works(self):
p1 = models.Product.objects.create(
name="The cathedral and the bazaar",
price=Decimal("10.00"),
)
p2 = models.Product.objects.create(
name="Pride and Prejudice", price=Decimal("2.00")
)
user1 = models.User.objects.create_user(
"user1", "pw432joij"
)
billing = models.Address.objects.create(
user=user1,
name="John Kimball",
address1="127 Strudel road",
city="London",
country="uk",
)
shipping = models.Address.objects.create(
user=user1,
name="John Kimball",
address1="123 Deacon road",
city="London",
country="uk",
)
basket = models.Basket.objects.create(user=user1)
models.BasketLine.objects.create(
basket=basket, product=p1
)
models.BasketLine.objects.create(
basket=basket, product=p2
)
with self.assertLogs("main.models", level="INFO") as cm:
order = basket.create_order(billing, shipping)
self.assertGreaterEqual(len(cm.output), 1)
order.refresh_from_db()
self.assertEquals(order.user, user1)
self.assertEquals(
order.billing_address1, "127 Strudel road"
)
self.assertEquals(
order.shipping_address1, "123 Deacon road"
)
# add more checks here
self.assertEquals(order.lines.all().count(), 2)
lines = order.lines.all()
self.assertEquals(lines[0].product, p1)
self.assertEquals(lines[1].product, p2)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.