code
stringlengths 114
1.05M
| path
stringlengths 3
312
| quality_prob
float64 0.5
0.99
| learning_prob
float64 0.2
1
| filename
stringlengths 3
168
| kind
stringclasses 1
value |
---|---|---|---|---|---|
from typing import Dict, List, Optional
from salvia.consensus.block_record import BlockRecord
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.blockchain_format.vdf import VDFInfo
from salvia.types.header_block import HeaderBlock
from salvia.types.weight_proof import SubEpochChallengeSegment
from salvia.util.ints import uint32
class BlockchainInterface:
def get_peak(self) -> Optional[BlockRecord]:
pass
def get_peak_height(self) -> Optional[uint32]:
pass
def block_record(self, header_hash: bytes32) -> BlockRecord:
pass
def height_to_block_record(self, height: uint32) -> BlockRecord:
pass
def get_ses_heights(self) -> List[uint32]:
pass
def get_ses(self, height: uint32) -> SubEpochSummary:
pass
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
pass
def contains_block(self, header_hash: bytes32) -> bool:
pass
def remove_block_record(self, header_hash: bytes32):
pass
def add_block_record(self, block_record: BlockRecord):
pass
def contains_height(self, height: uint32) -> bool:
pass
async def warmup(self, fork_point: uint32):
pass
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
pass
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
pass
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
pass
async def get_header_block_by_height(
self, height: int, header_hash: bytes32, tx_filter: bool = True
) -> Optional[HeaderBlock]:
pass
async def get_block_records_at(self, heights: List[uint32]) -> List[BlockRecord]:
pass
def try_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
if self.contains_block(header_hash):
return self.block_record(header_hash)
return None
async def persist_sub_epoch_challenge_segments(
self, sub_epoch_summary_height: uint32, segments: List[SubEpochChallengeSegment]
):
pass
async def get_sub_epoch_challenge_segments(
self,
sub_epoch_summary_height: uint32,
) -> Optional[List[SubEpochChallengeSegment]]:
pass
def seen_compact_proofs(self, vdf_info: VDFInfo, height: uint32) -> bool:
pass
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/consensus/blockchain_interface.py
| 0.873849 | 0.369912 |
blockchain_interface.py
|
pypi
|
from typing import Any, Callable, Dict, List, Optional
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR
from salvia.full_node.full_node import FullNode
from salvia.full_node.mempool_check_conditions import get_puzzle_and_solution_for_coin
from salvia.types.blockchain_format.program import Program, SerializedProgram
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.coin_record import CoinRecord
from salvia.types.coin_spend import CoinSpend
from salvia.types.full_block import FullBlock
from salvia.types.generator_types import BlockGenerator
from salvia.types.mempool_inclusion_status import MempoolInclusionStatus
from salvia.types.spend_bundle import SpendBundle
from salvia.types.unfinished_header_block import UnfinishedHeaderBlock
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.ints import uint32, uint64, uint128
from salvia.util.ws_message import WsRpcMessage, create_payload_dict
class FullNodeRpcApi:
def __init__(self, service: FullNode):
self.service = service
self.service_name = "salvia_full_node"
self.cached_blockchain_state: Optional[Dict] = None
def get_routes(self) -> Dict[str, Callable]:
return {
# Blockchain
"/get_blockchain_state": self.get_blockchain_state,
"/get_block": self.get_block,
"/get_blocks": self.get_blocks,
"/get_block_record_by_height": self.get_block_record_by_height,
"/get_block_record": self.get_block_record,
"/get_block_records": self.get_block_records,
"/get_unfinished_block_headers": self.get_unfinished_block_headers,
"/get_network_space": self.get_network_space,
"/get_additions_and_removals": self.get_additions_and_removals,
# this function is just here for backwards-compatibility. It will probably
# be removed in the future
"/get_initial_freeze_period": self.get_initial_freeze_period,
"/get_network_info": self.get_network_info,
"/get_recent_signage_point_or_eos": self.get_recent_signage_point_or_eos,
# Coins
"/get_coin_records_by_puzzle_hash": self.get_coin_records_by_puzzle_hash,
"/get_coin_records_by_puzzle_hashes": self.get_coin_records_by_puzzle_hashes,
"/get_coin_record_by_name": self.get_coin_record_by_name,
"/get_coin_records_by_names": self.get_coin_records_by_names,
"/get_coin_records_by_parent_ids": self.get_coin_records_by_parent_ids,
"/push_tx": self.push_tx,
"/get_puzzle_and_solution": self.get_puzzle_and_solution,
# Mempool
"/get_all_mempool_tx_ids": self.get_all_mempool_tx_ids,
"/get_all_mempool_items": self.get_all_mempool_items,
"/get_mempool_item_by_tx_id": self.get_mempool_item_by_tx_id,
}
async def _state_changed(self, change: str) -> List[WsRpcMessage]:
payloads = []
if change == "new_peak" or change == "sync_mode":
data = await self.get_blockchain_state({})
assert data is not None
payloads.append(
create_payload_dict(
"get_blockchain_state",
data,
self.service_name,
"wallet_ui",
)
)
return payloads
return []
# this function is just here for backwards-compatibility. It will probably
# be removed in the future
async def get_initial_freeze_period(self, _: Dict):
# Mon May 03 2021 17:00:00 GMT+0000
return {"INITIAL_FREEZE_END_TIMESTAMP": 1620061200}
async def get_blockchain_state(self, _request: Dict):
"""
Returns a summary of the node's view of the blockchain.
"""
if self.service.initialized is False:
res: Dict = {
"blockchain_state": {
"peak": None,
"genesis_challenge_initialized": self.service.initialized,
"sync": {
"sync_mode": False,
"synced": False,
"sync_tip_height": 0,
"sync_progress_height": 0,
},
"difficulty": 0,
"sub_slot_iters": 0,
"space": 0,
"mempool_size": 0,
},
}
return res
peak: Optional[BlockRecord] = self.service.blockchain.get_peak()
if peak is not None and peak.height > 0:
difficulty = uint64(peak.weight - self.service.blockchain.block_record(peak.prev_hash).weight)
sub_slot_iters = peak.sub_slot_iters
else:
difficulty = self.service.constants.DIFFICULTY_STARTING
sub_slot_iters = self.service.constants.SUB_SLOT_ITERS_STARTING
sync_mode: bool = self.service.sync_store.get_sync_mode() or self.service.sync_store.get_long_sync()
sync_tip_height: Optional[uint32] = uint32(0)
if sync_mode:
if self.service.sync_store.get_sync_target_height() is not None:
sync_tip_height = self.service.sync_store.get_sync_target_height()
assert sync_tip_height is not None
if peak is not None:
sync_progress_height: uint32 = peak.height
# Don't display we're syncing towards 0, instead show 'Syncing height/height'
# until sync_store retrieves the correct number.
if sync_tip_height == uint32(0):
sync_tip_height = peak.height
else:
sync_progress_height = uint32(0)
else:
sync_progress_height = uint32(0)
if peak is not None and peak.height > 1:
newer_block_hex = peak.header_hash.hex()
# Average over the last day
header_hash = self.service.blockchain.height_to_hash(uint32(max(1, peak.height - 4608)))
assert header_hash is not None
older_block_hex = header_hash.hex()
space = await self.get_network_space(
{"newer_block_header_hash": newer_block_hex, "older_block_header_hash": older_block_hex}
)
else:
space = {"space": uint128(0)}
if self.service.mempool_manager is not None:
mempool_size = len(self.service.mempool_manager.mempool.spends)
else:
mempool_size = 0
if self.service.server is not None:
is_connected = len(self.service.server.get_full_node_connections()) > 0
else:
is_connected = False
synced = await self.service.synced() and is_connected
assert space is not None
response: Dict = {
"blockchain_state": {
"peak": peak,
"genesis_challenge_initialized": self.service.initialized,
"sync": {
"sync_mode": sync_mode,
"synced": synced,
"sync_tip_height": sync_tip_height,
"sync_progress_height": sync_progress_height,
},
"difficulty": difficulty,
"sub_slot_iters": sub_slot_iters,
"space": space["space"],
"mempool_size": mempool_size,
},
}
self.cached_blockchain_state = dict(response["blockchain_state"])
return response
async def get_network_info(self, request: Dict):
network_name = self.service.config["selected_network"]
address_prefix = self.service.config["network_overrides"]["config"][network_name]["address_prefix"]
return {"network_name": network_name, "network_prefix": address_prefix}
async def get_recent_signage_point_or_eos(self, request: Dict):
if "sp_hash" not in request:
challenge_hash: bytes32 = hexstr_to_bytes(request["challenge_hash"])
# This is the case of getting an end of slot
eos_tuple = self.service.full_node_store.recent_eos.get(challenge_hash)
if not eos_tuple:
raise ValueError(f"Did not find eos {challenge_hash.hex()} in cache")
eos, time_received = eos_tuple
# If it's still in the full node store, it's not reverted
if self.service.full_node_store.get_sub_slot(eos.challenge_chain.get_hash()):
return {"eos": eos, "time_received": time_received, "reverted": False}
# Otherwise we can backtrack from peak to find it in the blockchain
curr: Optional[BlockRecord] = self.service.blockchain.get_peak()
if curr is None:
raise ValueError("No blocks in the chain")
number_of_slots_searched = 0
while number_of_slots_searched < 10:
if curr.first_in_sub_slot:
assert curr.finished_challenge_slot_hashes is not None
if curr.finished_challenge_slot_hashes[-1] == eos.challenge_chain.get_hash():
# Found this slot in the blockchain
return {"eos": eos, "time_received": time_received, "reverted": False}
number_of_slots_searched += len(curr.finished_challenge_slot_hashes)
curr = self.service.blockchain.try_block_record(curr.prev_hash)
if curr is None:
# Got to the beginning of the blockchain without finding the slot
return {"eos": eos, "time_received": time_received, "reverted": True}
# Backtracked through 10 slots but still did not find it
return {"eos": eos, "time_received": time_received, "reverted": True}
# Now we handle the case of getting a signage point
sp_hash: bytes32 = hexstr_to_bytes(request["sp_hash"])
sp_tuple = self.service.full_node_store.recent_signage_points.get(sp_hash)
if sp_tuple is None:
raise ValueError(f"Did not find sp {sp_hash.hex()} in cache")
sp, time_received = sp_tuple
# If it's still in the full node store, it's not reverted
if self.service.full_node_store.get_signage_point(sp_hash):
return {"signage_point": sp, "time_received": time_received, "reverted": False}
# Otherwise we can backtrack from peak to find it in the blockchain
rc_challenge: bytes32 = sp.rc_vdf.challenge
next_b: Optional[BlockRecord] = None
curr_b_optional: Optional[BlockRecord] = self.service.blockchain.get_peak()
assert curr_b_optional is not None
curr_b: BlockRecord = curr_b_optional
for _ in range(200):
sp_total_iters = sp.cc_vdf.number_of_iterations + curr_b.ip_sub_slot_total_iters(self.service.constants)
if curr_b.reward_infusion_new_challenge == rc_challenge:
if next_b is None:
return {"signage_point": sp, "time_received": time_received, "reverted": False}
next_b_total_iters = next_b.ip_sub_slot_total_iters(self.service.constants) + next_b.ip_iters(
self.service.constants
)
return {
"signage_point": sp,
"time_received": time_received,
"reverted": sp_total_iters > next_b_total_iters,
}
if curr_b.finished_reward_slot_hashes is not None:
assert curr_b.finished_challenge_slot_hashes is not None
for eos_rc in curr_b.finished_challenge_slot_hashes:
if eos_rc == rc_challenge:
if next_b is None:
return {"signage_point": sp, "time_received": time_received, "reverted": False}
next_b_total_iters = next_b.ip_sub_slot_total_iters(self.service.constants) + next_b.ip_iters(
self.service.constants
)
return {
"signage_point": sp,
"time_received": time_received,
"reverted": sp_total_iters > next_b_total_iters,
}
next_b = curr_b
curr_b_optional = self.service.blockchain.try_block_record(curr_b.prev_hash)
if curr_b_optional is None:
break
curr_b = curr_b_optional
return {"signage_point": sp, "time_received": time_received, "reverted": True}
async def get_block(self, request: Dict) -> Optional[Dict]:
if "header_hash" not in request:
raise ValueError("No header_hash in request")
header_hash = hexstr_to_bytes(request["header_hash"])
block: Optional[FullBlock] = await self.service.block_store.get_full_block(header_hash)
if block is None:
raise ValueError(f"Block {header_hash.hex()} not found")
return {"block": block}
async def get_blocks(self, request: Dict) -> Optional[Dict]:
if "start" not in request:
raise ValueError("No start in request")
if "end" not in request:
raise ValueError("No end in request")
exclude_hh = False
if "exclude_header_hash" in request:
exclude_hh = request["exclude_header_hash"]
start = int(request["start"])
end = int(request["end"])
block_range = []
for a in range(start, end):
block_range.append(uint32(a))
blocks: List[FullBlock] = await self.service.block_store.get_full_blocks_at(block_range)
json_blocks = []
for block in blocks:
json = block.to_json_dict()
if not exclude_hh:
json["header_hash"] = block.header_hash.hex()
json_blocks.append(json)
return {"blocks": json_blocks}
async def get_block_records(self, request: Dict) -> Optional[Dict]:
if "start" not in request:
raise ValueError("No start in request")
if "end" not in request:
raise ValueError("No end in request")
start = int(request["start"])
end = int(request["end"])
records = []
peak_height = self.service.blockchain.get_peak_height()
if peak_height is None:
raise ValueError("Peak is None")
for a in range(start, end):
if peak_height < uint32(a):
self.service.log.warning("requested block is higher than known peak ")
break
header_hash: bytes32 = self.service.blockchain.height_to_hash(uint32(a))
record: Optional[BlockRecord] = self.service.blockchain.try_block_record(header_hash)
if record is None:
# Fetch from DB
record = await self.service.blockchain.block_store.get_block_record(header_hash)
if record is None:
raise ValueError(f"Block {header_hash.hex()} does not exist")
records.append(record)
return {"block_records": records}
async def get_block_record_by_height(self, request: Dict) -> Optional[Dict]:
if "height" not in request:
raise ValueError("No height in request")
height = request["height"]
header_height = uint32(int(height))
peak_height = self.service.blockchain.get_peak_height()
if peak_height is None or header_height > peak_height:
raise ValueError(f"Block height {height} not found in chain")
header_hash: Optional[bytes32] = self.service.blockchain.height_to_hash(header_height)
if header_hash is None:
raise ValueError(f"Block hash {height} not found in chain")
record: Optional[BlockRecord] = self.service.blockchain.try_block_record(header_hash)
if record is None:
# Fetch from DB
record = await self.service.blockchain.block_store.get_block_record(header_hash)
if record is None:
raise ValueError(f"Block {header_hash} does not exist")
return {"block_record": record}
async def get_block_record(self, request: Dict):
if "header_hash" not in request:
raise ValueError("header_hash not in request")
header_hash_str = request["header_hash"]
header_hash = hexstr_to_bytes(header_hash_str)
record: Optional[BlockRecord] = self.service.blockchain.try_block_record(header_hash)
if record is None:
# Fetch from DB
record = await self.service.blockchain.block_store.get_block_record(header_hash)
if record is None:
raise ValueError(f"Block {header_hash.hex()} does not exist")
return {"block_record": record}
async def get_unfinished_block_headers(self, request: Dict) -> Optional[Dict]:
peak: Optional[BlockRecord] = self.service.blockchain.get_peak()
if peak is None:
return {"headers": []}
response_headers: List[UnfinishedHeaderBlock] = []
for ub_height, block, _ in (self.service.full_node_store.get_unfinished_blocks()).values():
if ub_height == peak.height:
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
b"",
)
response_headers.append(unfinished_header_block)
return {"headers": response_headers}
async def get_network_space(self, request: Dict) -> Optional[Dict]:
"""
Retrieves an estimate of total space validating the chain
between two block header hashes.
"""
if "newer_block_header_hash" not in request or "older_block_header_hash" not in request:
raise ValueError("Invalid request. newer_block_header_hash and older_block_header_hash required")
newer_block_hex = request["newer_block_header_hash"]
older_block_hex = request["older_block_header_hash"]
if newer_block_hex == older_block_hex:
raise ValueError("New and old must not be the same")
newer_block_bytes = hexstr_to_bytes(newer_block_hex)
older_block_bytes = hexstr_to_bytes(older_block_hex)
newer_block = await self.service.block_store.get_block_record(newer_block_bytes)
if newer_block is None:
raise ValueError("Newer block not found")
older_block = await self.service.block_store.get_block_record(older_block_bytes)
if older_block is None:
raise ValueError("Newer block not found")
delta_weight = newer_block.weight - older_block.weight
delta_iters = newer_block.total_iters - older_block.total_iters
weight_div_iters = delta_weight / delta_iters
additional_difficulty_constant = self.service.constants.DIFFICULTY_CONSTANT_FACTOR
eligible_plots_filter_multiplier = 2 ** self.service.constants.NUMBER_ZERO_BITS_PLOT_FILTER
network_space_bytes_estimate = (
UI_ACTUAL_SPACE_CONSTANT_FACTOR
* weight_div_iters
* additional_difficulty_constant
* eligible_plots_filter_multiplier
)
return {"space": uint128(int(network_space_bytes_estimate))}
async def get_coin_records_by_puzzle_hash(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the coins for a given puzzlehash, by default returns unspent coins.
"""
if "puzzle_hash" not in request:
raise ValueError("Puzzle hash not in request")
kwargs: Dict[str, Any] = {"include_spent_coins": False, "puzzle_hash": hexstr_to_bytes(request["puzzle_hash"])}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_puzzle_hash(**kwargs)
return {"coin_records": coin_records}
async def get_coin_records_by_puzzle_hashes(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the coins for a given puzzlehash, by default returns unspent coins.
"""
if "puzzle_hashes" not in request:
raise ValueError("Puzzle hashes not in request")
kwargs: Dict[str, Any] = {
"include_spent_coins": False,
"puzzle_hashes": [hexstr_to_bytes(ph) for ph in request["puzzle_hashes"]],
}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_puzzle_hashes(**kwargs)
return {"coin_records": coin_records}
async def get_coin_record_by_name(self, request: Dict) -> Optional[Dict]:
"""
Retrieves a coin record by it's name.
"""
if "name" not in request:
raise ValueError("Name not in request")
name = hexstr_to_bytes(request["name"])
coin_record: Optional[CoinRecord] = await self.service.blockchain.coin_store.get_coin_record(name)
if coin_record is None:
raise ValueError(f"Coin record 0x{name.hex()} not found")
return {"coin_record": coin_record}
async def get_coin_records_by_names(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the coins for given coin IDs, by default returns unspent coins.
"""
if "names" not in request:
raise ValueError("Names not in request")
kwargs: Dict[str, Any] = {
"include_spent_coins": False,
"names": [hexstr_to_bytes(name) for name in request["names"]],
}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_names(**kwargs)
return {"coin_records": coin_records}
async def get_coin_records_by_parent_ids(self, request: Dict) -> Optional[Dict]:
"""
Retrieves the coins for given parent coin IDs, by default returns unspent coins.
"""
if "parent_ids" not in request:
raise ValueError("Parent IDs not in request")
kwargs: Dict[str, Any] = {
"include_spent_coins": False,
"parent_ids": [hexstr_to_bytes(ph) for ph in request["parent_ids"]],
}
if "start_height" in request:
kwargs["start_height"] = uint32(request["start_height"])
if "end_height" in request:
kwargs["end_height"] = uint32(request["end_height"])
if "include_spent_coins" in request:
kwargs["include_spent_coins"] = request["include_spent_coins"]
coin_records = await self.service.blockchain.coin_store.get_coin_records_by_parent_ids(**kwargs)
return {"coin_records": coin_records}
async def push_tx(self, request: Dict) -> Optional[Dict]:
if "spend_bundle" not in request:
raise ValueError("Spend bundle not in request")
spend_bundle = SpendBundle.from_json_dict(request["spend_bundle"])
spend_name = spend_bundle.name()
if self.service.mempool_manager.get_spendbundle(spend_name) is not None:
status = MempoolInclusionStatus.SUCCESS
error = None
else:
status, error = await self.service.respond_transaction(spend_bundle, spend_name)
if status != MempoolInclusionStatus.SUCCESS:
if self.service.mempool_manager.get_spendbundle(spend_name) is not None:
# Already in mempool
status = MempoolInclusionStatus.SUCCESS
error = None
if status == MempoolInclusionStatus.FAILED:
assert error is not None
raise ValueError(f"Failed to include transaction {spend_name}, error {error.name}")
return {
"status": status.name,
}
async def get_puzzle_and_solution(self, request: Dict) -> Optional[Dict]:
coin_name: bytes32 = hexstr_to_bytes(request["coin_id"])
height = request["height"]
coin_record = await self.service.coin_store.get_coin_record(coin_name)
if coin_record is None or not coin_record.spent or coin_record.spent_block_index != height:
raise ValueError(f"Invalid height {height}. coin record {coin_record}")
header_hash = self.service.blockchain.height_to_hash(height)
block: Optional[FullBlock] = await self.service.block_store.get_full_block(header_hash)
if block is None or block.transactions_generator is None:
raise ValueError("Invalid block or block generator")
block_generator: Optional[BlockGenerator] = await self.service.blockchain.get_block_generator(block)
assert block_generator is not None
error, puzzle, solution = get_puzzle_and_solution_for_coin(
block_generator, coin_name, self.service.constants.MAX_BLOCK_COST_CLVM
)
if error is not None:
raise ValueError(f"Error: {error}")
puzzle_ser: SerializedProgram = SerializedProgram.from_program(Program.to(puzzle))
solution_ser: SerializedProgram = SerializedProgram.from_program(Program.to(solution))
return {"coin_solution": CoinSpend(coin_record.coin, puzzle_ser, solution_ser)}
async def get_additions_and_removals(self, request: Dict) -> Optional[Dict]:
if "header_hash" not in request:
raise ValueError("No header_hash in request")
header_hash = hexstr_to_bytes(request["header_hash"])
block: Optional[FullBlock] = await self.service.block_store.get_full_block(header_hash)
if block is None:
raise ValueError(f"Block {header_hash.hex()} not found")
async with self.service._blockchain_lock_low_priority:
if self.service.blockchain.height_to_hash(block.height) != header_hash:
raise ValueError(f"Block at {header_hash.hex()} is no longer in the blockchain (it's in a fork)")
additions: List[CoinRecord] = await self.service.coin_store.get_coins_added_at_height(block.height)
removals: List[CoinRecord] = await self.service.coin_store.get_coins_removed_at_height(block.height)
return {"additions": additions, "removals": removals}
async def get_all_mempool_tx_ids(self, request: Dict) -> Optional[Dict]:
ids = list(self.service.mempool_manager.mempool.spends.keys())
return {"tx_ids": ids}
async def get_all_mempool_items(self, request: Dict) -> Optional[Dict]:
spends = {}
for tx_id, item in self.service.mempool_manager.mempool.spends.items():
spends[tx_id.hex()] = item
return {"mempool_items": spends}
async def get_mempool_item_by_tx_id(self, request: Dict) -> Optional[Dict]:
if "tx_id" not in request:
raise ValueError("No tx_id in request")
tx_id: bytes32 = hexstr_to_bytes(request["tx_id"])
item = self.service.mempool_manager.get_mempool_item(tx_id)
if item is None:
raise ValueError(f"Tx id 0x{tx_id.hex()} not in the mempool")
return {"mempool_item": item}
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/rpc/full_node_rpc_api.py
| 0.864139 | 0.265684 |
full_node_rpc_api.py
|
pypi
|
from typing import Dict, List, Optional, Tuple, Any
from salvia.consensus.block_record import BlockRecord
from salvia.full_node.signage_point import SignagePoint
from salvia.rpc.rpc_client import RpcClient
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.coin_record import CoinRecord
from salvia.types.coin_spend import CoinSpend
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.types.full_block import FullBlock
from salvia.types.spend_bundle import SpendBundle
from salvia.types.unfinished_header_block import UnfinishedHeaderBlock
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.ints import uint32, uint64
class FullNodeRpcClient(RpcClient):
"""
Client to Salvia RPC, connects to a local full node. Uses HTTP/JSON, and converts back from
JSON into native python objects before returning. All api calls use POST requests.
Note that this is not the same as the peer protocol, or wallet protocol (which run Salvia's
protocol on top of TCP), it's a separate protocol on top of HTTP thats provides easy access
to the full node.
"""
async def get_blockchain_state(self) -> Dict:
response = await self.fetch("get_blockchain_state", {})
if response["blockchain_state"]["peak"] is not None:
response["blockchain_state"]["peak"] = BlockRecord.from_json_dict(response["blockchain_state"]["peak"])
return response["blockchain_state"]
async def get_block(self, header_hash) -> Optional[FullBlock]:
try:
response = await self.fetch("get_block", {"header_hash": header_hash.hex()})
except Exception:
return None
return FullBlock.from_json_dict(response["block"])
async def get_block_record_by_height(self, height) -> Optional[BlockRecord]:
try:
response = await self.fetch("get_block_record_by_height", {"height": height})
except Exception:
return None
return BlockRecord.from_json_dict(response["block_record"])
async def get_block_record(self, header_hash) -> Optional[BlockRecord]:
try:
response = await self.fetch("get_block_record", {"header_hash": header_hash.hex()})
if response["block_record"] is None:
return None
except Exception:
return None
return BlockRecord.from_json_dict(response["block_record"])
async def get_unfinished_block_headers(self) -> List[UnfinishedHeaderBlock]:
response = await self.fetch("get_unfinished_block_headers", {})
return [UnfinishedHeaderBlock.from_json_dict(r) for r in response["headers"]]
async def get_all_block(self, start: uint32, end: uint32) -> List[FullBlock]:
response = await self.fetch("get_blocks", {"start": start, "end": end, "exclude_header_hash": True})
return [FullBlock.from_json_dict(r) for r in response["blocks"]]
async def get_network_space(
self, newer_block_header_hash: bytes32, older_block_header_hash: bytes32
) -> Optional[uint64]:
try:
network_space_bytes_estimate = await self.fetch(
"get_network_space",
{
"newer_block_header_hash": newer_block_header_hash.hex(),
"older_block_header_hash": older_block_header_hash.hex(),
},
)
except Exception:
return None
return network_space_bytes_estimate["space"]
async def get_coin_record_by_name(self, coin_id: bytes32) -> Optional[CoinRecord]:
try:
response = await self.fetch("get_coin_record_by_name", {"name": coin_id.hex()})
except Exception:
return None
return CoinRecord.from_json_dict(response["coin_record"])
async def get_coin_records_by_names(
self,
names: List[bytes32],
include_spent_coins: bool = True,
start_height: Optional[int] = None,
end_height: Optional[int] = None,
) -> List:
names_hex = [name.hex() for name in names]
d = {"names": names_hex, "include_spent_coins": include_spent_coins}
if start_height is not None:
d["start_height"] = start_height
if end_height is not None:
d["end_height"] = end_height
return [
CoinRecord.from_json_dict(coin)
for coin in (await self.fetch("get_coin_records_by_names", d))["coin_records"]
]
async def get_coin_records_by_puzzle_hash(
self,
puzzle_hash: bytes32,
include_spent_coins: bool = True,
start_height: Optional[int] = None,
end_height: Optional[int] = None,
) -> List:
d = {"puzzle_hash": puzzle_hash.hex(), "include_spent_coins": include_spent_coins}
if start_height is not None:
d["start_height"] = start_height
if end_height is not None:
d["end_height"] = end_height
return [
CoinRecord.from_json_dict(coin)
for coin in (await self.fetch("get_coin_records_by_puzzle_hash", d))["coin_records"]
]
async def get_coin_records_by_puzzle_hashes(
self,
puzzle_hashes: List[bytes32],
include_spent_coins: bool = True,
start_height: Optional[int] = None,
end_height: Optional[int] = None,
) -> List:
puzzle_hashes_hex = [ph.hex() for ph in puzzle_hashes]
d = {"puzzle_hashes": puzzle_hashes_hex, "include_spent_coins": include_spent_coins}
if start_height is not None:
d["start_height"] = start_height
if end_height is not None:
d["end_height"] = end_height
return [
CoinRecord.from_json_dict(coin)
for coin in (await self.fetch("get_coin_records_by_puzzle_hashes", d))["coin_records"]
]
async def get_coin_records_by_parent_ids(
self,
parent_ids: List[bytes32],
include_spent_coins: bool = True,
start_height: Optional[int] = None,
end_height: Optional[int] = None,
) -> List:
parent_ids_hex = [pid.hex() for pid in parent_ids]
d = {"parent_ids": parent_ids_hex, "include_spent_coins": include_spent_coins}
if start_height is not None:
d["start_height"] = start_height
if end_height is not None:
d["end_height"] = end_height
return [
CoinRecord.from_json_dict(coin)
for coin in (await self.fetch("get_coin_records_by_parent_ids", d))["coin_records"]
]
async def get_additions_and_removals(self, header_hash: bytes32) -> Tuple[List[CoinRecord], List[CoinRecord]]:
try:
response = await self.fetch("get_additions_and_removals", {"header_hash": header_hash.hex()})
except Exception:
return [], []
removals = []
additions = []
for coin_record in response["removals"]:
removals.append(CoinRecord.from_json_dict(coin_record))
for coin_record in response["additions"]:
additions.append(CoinRecord.from_json_dict(coin_record))
return additions, removals
async def get_block_records(self, start: int, end: int) -> List:
try:
response = await self.fetch("get_block_records", {"start": start, "end": end})
if response["block_records"] is None:
return []
except Exception:
return []
# TODO: return block records
return response["block_records"]
async def push_tx(self, spend_bundle: SpendBundle):
return await self.fetch("push_tx", {"spend_bundle": spend_bundle.to_json_dict()})
async def get_puzzle_and_solution(self, coin_id: bytes32, height: uint32) -> Optional[CoinSpend]:
try:
response = await self.fetch("get_puzzle_and_solution", {"coin_id": coin_id.hex(), "height": height})
return CoinSpend.from_json_dict(response["coin_solution"])
except Exception:
return None
async def get_all_mempool_tx_ids(self) -> List[bytes32]:
response = await self.fetch("get_all_mempool_tx_ids", {})
return [bytes32(hexstr_to_bytes(tx_id_hex)) for tx_id_hex in response["tx_ids"]]
async def get_all_mempool_items(self) -> Dict[bytes32, Dict]:
response: Dict = await self.fetch("get_all_mempool_items", {})
converted: Dict[bytes32, Dict] = {}
for tx_id_hex, item in response["mempool_items"].items():
converted[bytes32(hexstr_to_bytes(tx_id_hex))] = item
return converted
async def get_mempool_item_by_tx_id(self, tx_id: bytes32) -> Optional[Dict]:
try:
response = await self.fetch("get_mempool_item_by_tx_id", {"tx_id": tx_id.hex()})
return response["mempool_item"]
except Exception:
return None
async def get_recent_signage_point_or_eos(
self, sp_hash: Optional[bytes32], challenge_hash: Optional[bytes32]
) -> Optional[Any]:
try:
if sp_hash is not None:
assert challenge_hash is None
response = await self.fetch("get_recent_signage_point_or_eos", {"sp_hash": sp_hash.hex()})
return {
"signage_point": SignagePoint.from_json_dict(response["signage_point"]),
"time_received": response["time_received"],
"reverted": response["reverted"],
}
else:
assert challenge_hash is not None
response = await self.fetch("get_recent_signage_point_or_eos", {"challenge_hash": challenge_hash.hex()})
return {
"eos": EndOfSubSlotBundle.from_json_dict(response["eos"]),
"time_received": response["time_received"],
"reverted": response["reverted"],
}
except Exception:
return None
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/rpc/full_node_rpc_client.py
| 0.865707 | 0.296234 |
full_node_rpc_client.py
|
pypi
|
from typing import Callable, Dict, List
from salvia.harvester.harvester import Harvester
from salvia.util.ws_message import WsRpcMessage, create_payload_dict
class HarvesterRpcApi:
def __init__(self, harvester: Harvester):
self.service = harvester
self.service_name = "salvia_harvester"
def get_routes(self) -> Dict[str, Callable]:
return {
"/get_plots": self.get_plots,
"/refresh_plots": self.refresh_plots,
"/delete_plot": self.delete_plot,
"/add_plot_directory": self.add_plot_directory,
"/get_plot_directories": self.get_plot_directories,
"/remove_plot_directory": self.remove_plot_directory,
}
async def _state_changed(self, change: str) -> List[WsRpcMessage]:
if change == "plots":
data = await self.get_plots({})
payload = create_payload_dict("get_plots", data, self.service_name, "wallet_ui")
return [payload]
return []
async def get_plots(self, request: Dict) -> Dict:
plots, failed_to_open, not_found = self.service.get_plots()
return {
"plots": plots,
"failed_to_open_filenames": failed_to_open,
"not_found_filenames": not_found,
}
async def refresh_plots(self, request: Dict) -> Dict:
self.service.plot_manager.trigger_refresh()
return {}
async def delete_plot(self, request: Dict) -> Dict:
filename = request["filename"]
if self.service.delete_plot(filename):
return {}
raise ValueError(f"Not able to delete file {filename}")
async def add_plot_directory(self, request: Dict) -> Dict:
directory_name = request["dirname"]
if await self.service.add_plot_directory(directory_name):
return {}
raise ValueError(f"Did not add plot directory {directory_name}")
async def get_plot_directories(self, request: Dict) -> Dict:
plot_dirs = await self.service.get_plot_directories()
return {"directories": plot_dirs}
async def remove_plot_directory(self, request: Dict) -> Dict:
directory_name = request["dirname"]
if await self.service.remove_plot_directory(directory_name):
return {}
raise ValueError(f"Did not remove plot directory {directory_name}")
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/rpc/harvester_rpc_api.py
| 0.801703 | 0.294532 |
harvester_rpc_api.py
|
pypi
|
from typing import Callable, Dict, List, Optional
from salvia.farmer.farmer import Farmer
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.ws_message import WsRpcMessage, create_payload_dict
class FarmerRpcApi:
def __init__(self, farmer: Farmer):
self.service = farmer
self.service_name = "salvia_farmer"
def get_routes(self) -> Dict[str, Callable]:
return {
"/get_signage_point": self.get_signage_point,
"/get_signage_points": self.get_signage_points,
"/get_reward_targets": self.get_reward_targets,
"/set_reward_targets": self.set_reward_targets,
"/get_pool_state": self.get_pool_state,
"/set_payout_instructions": self.set_payout_instructions,
"/get_harvesters": self.get_harvesters,
"/get_pool_login_link": self.get_pool_login_link,
}
async def _state_changed(self, change: str, change_data: Dict) -> List[WsRpcMessage]:
if change == "new_signage_point":
sp_hash = change_data["sp_hash"]
data = await self.get_signage_point({"sp_hash": sp_hash.hex()})
return [
create_payload_dict(
"new_signage_point",
data,
self.service_name,
"wallet_ui",
)
]
elif change == "new_farming_info":
return [
create_payload_dict(
"new_farming_info",
change_data,
self.service_name,
"wallet_ui",
)
]
elif change == "new_plots":
return [
create_payload_dict(
"get_harvesters",
change_data,
self.service_name,
"wallet_ui",
)
]
return []
async def get_signage_point(self, request: Dict) -> Dict:
sp_hash = hexstr_to_bytes(request["sp_hash"])
for _, sps in self.service.sps.items():
for sp in sps:
if sp.challenge_chain_sp == sp_hash:
pospaces = self.service.proofs_of_space.get(sp.challenge_chain_sp, [])
return {
"signage_point": {
"challenge_hash": sp.challenge_hash,
"challenge_chain_sp": sp.challenge_chain_sp,
"reward_chain_sp": sp.reward_chain_sp,
"difficulty": sp.difficulty,
"sub_slot_iters": sp.sub_slot_iters,
"signage_point_index": sp.signage_point_index,
},
"proofs": pospaces,
}
raise ValueError(f"Signage point {sp_hash.hex()} not found")
async def get_signage_points(self, _: Dict) -> Dict:
result: List = []
for _, sps in self.service.sps.items():
for sp in sps:
pospaces = self.service.proofs_of_space.get(sp.challenge_chain_sp, [])
result.append(
{
"signage_point": {
"challenge_hash": sp.challenge_hash,
"challenge_chain_sp": sp.challenge_chain_sp,
"reward_chain_sp": sp.reward_chain_sp,
"difficulty": sp.difficulty,
"sub_slot_iters": sp.sub_slot_iters,
"signage_point_index": sp.signage_point_index,
},
"proofs": pospaces,
}
)
return {"signage_points": result}
async def get_reward_targets(self, request: Dict) -> Dict:
search_for_private_key = request["search_for_private_key"]
return await self.service.get_reward_targets(search_for_private_key)
async def set_reward_targets(self, request: Dict) -> Dict:
farmer_target, pool_target = None, None
if "farmer_target" in request:
farmer_target = request["farmer_target"]
if "pool_target" in request:
pool_target = request["pool_target"]
self.service.set_reward_targets(farmer_target, pool_target)
return {}
async def get_pool_state(self, _: Dict) -> Dict:
pools_list = []
for p2_singleton_puzzle_hash, pool_dict in self.service.pool_state.items():
pool_state = pool_dict.copy()
pool_state["p2_singleton_puzzle_hash"] = p2_singleton_puzzle_hash.hex()
pools_list.append(pool_state)
return {"pool_state": pools_list}
async def set_payout_instructions(self, request: Dict) -> Dict:
launcher_id: bytes32 = hexstr_to_bytes(request["launcher_id"])
await self.service.set_payout_instructions(launcher_id, request["payout_instructions"])
return {}
async def get_harvesters(self, _: Dict):
return await self.service.get_harvesters()
async def get_pool_login_link(self, request: Dict) -> Dict:
launcher_id: bytes32 = bytes32(hexstr_to_bytes(request["launcher_id"]))
login_link: Optional[str] = await self.service.generate_login_link(launcher_id)
if login_link is None:
raise ValueError(f"Failed to generate login link for {launcher_id.hex()}")
return {"login_link": login_link}
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/rpc/farmer_rpc_api.py
| 0.803212 | 0.23243 |
farmer_rpc_api.py
|
pypi
|
from pathlib import Path
from typing import Dict, List, Optional, Any, Tuple
from salvia.pools.pool_wallet_info import PoolWalletInfo
from salvia.rpc.rpc_client import RpcClient
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.bech32m import decode_puzzle_hash
from salvia.util.ints import uint32, uint64
from salvia.wallet.transaction_record import TransactionRecord
class WalletRpcClient(RpcClient):
"""
Client to Salvia RPC, connects to a local wallet. Uses HTTP/JSON, and converts back from
JSON into native python objects before returning. All api calls use POST requests.
Note that this is not the same as the peer protocol, or wallet protocol (which run Salvia's
protocol on top of TCP), it's a separate protocol on top of HTTP that provides easy access
to the full node.
"""
# Key Management APIs
async def log_in(self, fingerprint: int) -> Dict:
try:
return await self.fetch(
"log_in",
{"host": "https://backup.salvianetwork.net", "fingerprint": fingerprint, "type": "start"},
)
except ValueError as e:
return e.args[0]
async def log_in_and_restore(self, fingerprint: int, file_path) -> Dict:
try:
return await self.fetch(
"log_in",
{
"host": "https://backup.salvianetwork.net",
"fingerprint": fingerprint,
"type": "restore_backup",
"file_path": file_path,
},
)
except ValueError as e:
return e.args[0]
async def log_in_and_skip(self, fingerprint: int) -> Dict:
try:
return await self.fetch(
"log_in",
{"host": "https://backup.salvianetwork.net", "fingerprint": fingerprint, "type": "skip"},
)
except ValueError as e:
return e.args[0]
async def get_public_keys(self) -> List[int]:
return (await self.fetch("get_public_keys", {}))["public_key_fingerprints"]
async def get_private_key(self, fingerprint: int) -> Dict:
return (await self.fetch("get_private_key", {"fingerprint": fingerprint}))["private_key"]
async def generate_mnemonic(self) -> List[str]:
return (await self.fetch("generate_mnemonic", {}))["mnemonic"]
async def add_key(self, mnemonic: List[str], request_type: str = "new_wallet") -> None:
return await self.fetch("add_key", {"mnemonic": mnemonic, "type": request_type})
async def delete_key(self, fingerprint: int) -> None:
return await self.fetch("delete_key", {"fingerprint": fingerprint})
async def check_delete_key(self, fingerprint: int) -> None:
return await self.fetch("check_delete_key", {"fingerprint": fingerprint})
async def delete_all_keys(self) -> None:
return await self.fetch("delete_all_keys", {})
# Wallet Node APIs
async def get_sync_status(self) -> bool:
return (await self.fetch("get_sync_status", {}))["syncing"]
async def get_synced(self) -> bool:
return (await self.fetch("get_sync_status", {}))["synced"]
async def get_height_info(self) -> uint32:
return (await self.fetch("get_height_info", {}))["height"]
async def farm_block(self, address: str) -> None:
return await self.fetch("farm_block", {"address": address})
# Wallet Management APIs
async def get_wallets(self) -> Dict:
return (await self.fetch("get_wallets", {}))["wallets"]
# Wallet APIs
async def get_wallet_balance(self, wallet_id: str) -> Dict:
return (await self.fetch("get_wallet_balance", {"wallet_id": wallet_id}))["wallet_balance"]
async def get_transaction(self, wallet_id: str, transaction_id: bytes32) -> TransactionRecord:
res = await self.fetch(
"get_transaction",
{"walled_id": wallet_id, "transaction_id": transaction_id.hex()},
)
return TransactionRecord.from_json_dict(res["transaction"])
async def get_transactions(
self,
wallet_id: str,
) -> List[TransactionRecord]:
res = await self.fetch(
"get_transactions",
{"wallet_id": wallet_id},
)
reverted_tx: List[TransactionRecord] = []
for modified_tx in res["transactions"]:
# Server returns address instead of ph, but TransactionRecord requires ph
modified_tx["to_puzzle_hash"] = decode_puzzle_hash(modified_tx["to_address"]).hex()
del modified_tx["to_address"]
reverted_tx.append(TransactionRecord.from_json_dict(modified_tx))
return reverted_tx
async def get_next_address(self, wallet_id: str, new_address: bool) -> str:
return (await self.fetch("get_next_address", {"wallet_id": wallet_id, "new_address": new_address}))["address"]
async def send_transaction(
self, wallet_id: str, amount: uint64, address: str, fee: uint64 = uint64(0)
) -> TransactionRecord:
res = await self.fetch(
"send_transaction",
{"wallet_id": wallet_id, "amount": amount, "address": address, "fee": fee},
)
return TransactionRecord.from_json_dict(res["transaction"])
async def send_transaction_multi(
self, wallet_id: str, additions: List[Dict], coins: List[Coin] = None, fee: uint64 = uint64(0)
) -> TransactionRecord:
# Converts bytes to hex for puzzle hashes
additions_hex = [{"amount": ad["amount"], "puzzle_hash": ad["puzzle_hash"].hex()} for ad in additions]
if coins is not None and len(coins) > 0:
coins_json = [c.to_json_dict() for c in coins]
response: Dict = await self.fetch(
"send_transaction_multi",
{"wallet_id": wallet_id, "additions": additions_hex, "coins": coins_json, "fee": fee},
)
else:
response = await self.fetch(
"send_transaction_multi", {"wallet_id": wallet_id, "additions": additions_hex, "fee": fee}
)
return TransactionRecord.from_json_dict(response["transaction"])
async def delete_unconfirmed_transactions(self, wallet_id: str) -> None:
await self.fetch(
"delete_unconfirmed_transactions",
{"wallet_id": wallet_id},
)
return None
async def create_backup(self, file_path: Path) -> None:
return await self.fetch("create_backup", {"file_path": str(file_path.resolve())})
async def get_farmed_amount(self) -> Dict:
return await self.fetch("get_farmed_amount", {})
async def create_signed_transaction(
self, additions: List[Dict], coins: List[Coin] = None, fee: uint64 = uint64(0)
) -> TransactionRecord:
# Converts bytes to hex for puzzle hashes
additions_hex = [{"amount": ad["amount"], "puzzle_hash": ad["puzzle_hash"].hex()} for ad in additions]
if coins is not None and len(coins) > 0:
coins_json = [c.to_json_dict() for c in coins]
response: Dict = await self.fetch(
"create_signed_transaction", {"additions": additions_hex, "coins": coins_json, "fee": fee}
)
else:
response = await self.fetch("create_signed_transaction", {"additions": additions_hex, "fee": fee})
return TransactionRecord.from_json_dict(response["signed_tx"])
async def create_new_did_wallet(self, amount):
request: Dict[str, Any] = {
"wallet_type": "did_wallet",
"did_type": "new",
"backup_dids": [],
"num_of_backup_ids_needed": 0,
"amount": amount,
"host": f"{self.hostname}:{self.port}",
}
response = await self.fetch("create_new_wallet", request)
return response
async def create_new_did_wallet_from_recovery(self, filename):
request: Dict[str, Any] = {
"wallet_type": "did_wallet",
"did_type": "recovery",
"filename": filename,
"host": f"{self.hostname}:{self.port}",
}
response = await self.fetch("create_new_wallet", request)
return response
async def did_create_attest(self, wallet_id, coin_name, pubkey, puzhash, file_name):
request: Dict[str, Any] = {
"wallet_id": wallet_id,
"coin_name": coin_name,
"pubkey": pubkey,
"puzhash": puzhash,
"filename": file_name,
}
response = await self.fetch("did_create_attest", request)
return response
async def did_recovery_spend(self, wallet_id, attest_filenames):
request: Dict[str, Any] = {
"wallet_id": wallet_id,
"attest_filenames": attest_filenames,
}
response = await self.fetch("did_recovery_spend", request)
return response
# TODO: test all invocations of create_new_pool_wallet with new fee arg.
async def create_new_pool_wallet(
self,
target_puzzlehash: Optional[bytes32],
pool_url: Optional[str],
relative_lock_height: uint32,
backup_host: str,
mode: str,
state: str,
fee: uint64,
p2_singleton_delay_time: Optional[uint64] = None,
p2_singleton_delayed_ph: Optional[bytes32] = None,
) -> TransactionRecord:
request: Dict[str, Any] = {
"wallet_type": "pool_wallet",
"mode": mode,
"host": backup_host,
"initial_target_state": {
"target_puzzle_hash": target_puzzlehash.hex() if target_puzzlehash else None,
"relative_lock_height": relative_lock_height,
"pool_url": pool_url,
"state": state,
},
"fee": fee,
}
if p2_singleton_delay_time is not None:
request["p2_singleton_delay_time"] = p2_singleton_delay_time
if p2_singleton_delayed_ph is not None:
request["p2_singleton_delayed_ph"] = p2_singleton_delayed_ph.hex()
res = await self.fetch("create_new_wallet", request)
return TransactionRecord.from_json_dict(res["transaction"])
async def pw_self_pool(self, wallet_id: str, fee: uint64) -> TransactionRecord:
return TransactionRecord.from_json_dict(
(await self.fetch("pw_self_pool", {"wallet_id": wallet_id, "fee": fee}))["transaction"]
)
async def pw_join_pool(
self, wallet_id: str, target_puzzlehash: bytes32, pool_url: str, relative_lock_height: uint32, fee: uint64
) -> TransactionRecord:
request = {
"wallet_id": int(wallet_id),
"target_puzzlehash": target_puzzlehash.hex(),
"relative_lock_height": relative_lock_height,
"pool_url": pool_url,
"fee": fee,
}
join_reply = await self.fetch("pw_join_pool", request)
return TransactionRecord.from_json_dict(join_reply["transaction"])
async def pw_absorb_rewards(self, wallet_id: str, fee: uint64 = uint64(0)) -> TransactionRecord:
return TransactionRecord.from_json_dict(
(await self.fetch("pw_absorb_rewards", {"wallet_id": wallet_id, "fee": fee}))["transaction"]
)
async def pw_status(self, wallet_id: str) -> Tuple[PoolWalletInfo, List[TransactionRecord]]:
json_dict = await self.fetch("pw_status", {"wallet_id": wallet_id})
return (
PoolWalletInfo.from_json_dict(json_dict["state"]),
[TransactionRecord.from_json_dict(tr) for tr in json_dict["unconfirmed_transactions"]],
)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/rpc/wallet_rpc_client.py
| 0.832271 | 0.29972 |
wallet_rpc_client.py
|
pypi
|
from typing import Dict, List, Optional, Any
from salvia.rpc.rpc_client import RpcClient
from salvia.types.blockchain_format.sized_bytes import bytes32
class FarmerRpcClient(RpcClient):
"""
Client to Salvia RPC, connects to a local farmer. Uses HTTP/JSON, and converts back from
JSON into native python objects before returning. All api calls use POST requests.
Note that this is not the same as the peer protocol, or wallet protocol (which run Salvia's
protocol on top of TCP), it's a separate protocol on top of HTTP that provides easy access
to the full node.
"""
async def get_signage_point(self, sp_hash: bytes32) -> Optional[Dict]:
try:
return await self.fetch("get_signage_point", {"sp_hash": sp_hash.hex()})
except ValueError:
return None
async def get_signage_points(self) -> List[Dict]:
return (await self.fetch("get_signage_points", {}))["signage_points"]
async def get_reward_targets(self, search_for_private_key: bool) -> Dict:
response = await self.fetch("get_reward_targets", {"search_for_private_key": search_for_private_key})
return_dict = {
"farmer_target": response["farmer_target"],
"pool_target": response["pool_target"],
}
if "have_pool_sk" in response:
return_dict["have_pool_sk"] = response["have_pool_sk"]
if "have_farmer_sk" in response:
return_dict["have_farmer_sk"] = response["have_farmer_sk"]
return return_dict
async def set_reward_targets(self, farmer_target: Optional[str] = None, pool_target: Optional[str] = None) -> Dict:
request = {}
if farmer_target is not None:
request["farmer_target"] = farmer_target
if pool_target is not None:
request["pool_target"] = pool_target
return await self.fetch("set_reward_targets", request)
async def get_pool_state(self) -> Dict:
return await self.fetch("get_pool_state", {})
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str) -> Dict:
request = {"launcher_id": launcher_id.hex(), "payout_instructions": payout_instructions}
return await self.fetch("set_payout_instructions", request)
async def get_harvesters(self) -> Dict[str, Any]:
return await self.fetch("get_harvesters", {})
async def get_pool_login_link(self, launcher_id: bytes32) -> Optional[str]:
try:
return (await self.fetch("get_pool_login_link", {"launcher_id": launcher_id.hex()}))["login_link"]
except ValueError:
return None
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/rpc/farmer_rpc_client.py
| 0.861771 | 0.228286 |
farmer_rpc_client.py
|
pypi
|
import asyncio
import concurrent
import logging
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
from typing import Callable, Dict, List, Optional, Tuple
import salvia.server.ws_connection as ws # lgtm [py/import-and-import-from]
from salvia.consensus.constants import ConsensusConstants
from salvia.plotting.manager import PlotManager
from salvia.plotting.util import (
add_plot_directory,
get_plot_directories,
remove_plot_directory,
remove_plot,
PlotsRefreshParameter,
PlotRefreshResult,
PlotRefreshEvents,
)
from salvia.util.streamable import dataclass_from_dict
log = logging.getLogger(__name__)
class Harvester:
plot_manager: PlotManager
root_path: Path
_is_shutdown: bool
executor: ThreadPoolExecutor
state_changed_callback: Optional[Callable]
cached_challenges: List
constants: ConsensusConstants
_refresh_lock: asyncio.Lock
event_loop: asyncio.events.AbstractEventLoop
def __init__(self, root_path: Path, config: Dict, constants: ConsensusConstants):
self.log = log
self.root_path = root_path
# TODO, remove checks below later after some versions / time
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter()
if "plot_loading_frequency_seconds" in config:
self.log.info(
"`harvester.plot_loading_frequency_seconds` is deprecated. Consider replacing it with the new section "
"`harvester.plots_refresh_parameter`. See `initial-config.yaml`."
)
if "plots_refresh_parameter" in config:
refresh_parameter = dataclass_from_dict(PlotsRefreshParameter, config["plots_refresh_parameter"])
self.plot_manager = PlotManager(
root_path, refresh_parameter=refresh_parameter, refresh_callback=self._plot_refresh_callback
)
self._is_shutdown = False
self.executor = concurrent.futures.ThreadPoolExecutor(max_workers=config["num_threads"])
self.state_changed_callback = None
self.server = None
self.constants = constants
self.cached_challenges = []
self.state_changed_callback: Optional[Callable] = None
self.parallel_read: bool = config.get("parallel_read", True)
async def _start(self):
self._refresh_lock = asyncio.Lock()
self.event_loop = asyncio.get_event_loop()
def _close(self):
self._is_shutdown = True
self.executor.shutdown(wait=True)
self.plot_manager.stop_refreshing()
async def _await_closed(self):
pass
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
def _state_changed(self, change: str):
if self.state_changed_callback is not None:
self.state_changed_callback(change)
def _plot_refresh_callback(self, event: PlotRefreshEvents, update_result: PlotRefreshResult):
self.log.info(
f"refresh_batch: event {event.name}, loaded {update_result.loaded}, "
f"removed {update_result.removed}, processed {update_result.processed}, "
f"remaining {update_result.remaining}, "
f"duration: {update_result.duration:.2f} seconds"
)
if update_result.loaded > 0:
self.event_loop.call_soon_threadsafe(self._state_changed, "plots")
def on_disconnect(self, connection: ws.WSSalviaConnection):
self.log.info(f"peer disconnected {connection.get_peer_logging()}")
self._state_changed("close_connection")
def get_plots(self) -> Tuple[List[Dict], List[str], List[str]]:
self.log.debug(f"get_plots prover items: {self.plot_manager.plot_count()}")
response_plots: List[Dict] = []
with self.plot_manager:
for path, plot_info in self.plot_manager.plots.items():
prover = plot_info.prover
response_plots.append(
{
"filename": str(path),
"size": prover.get_size(),
"plot-seed": prover.get_id(), # Deprecated
"plot_id": prover.get_id(),
"pool_public_key": plot_info.pool_public_key,
"pool_contract_puzzle_hash": plot_info.pool_contract_puzzle_hash,
"plot_public_key": plot_info.plot_public_key,
"file_size": plot_info.file_size,
"time_modified": plot_info.time_modified,
}
)
self.log.debug(
f"get_plots response: plots: {len(response_plots)}, "
f"failed_to_open_filenames: {len(self.plot_manager.failed_to_open_filenames)}, "
f"no_key_filenames: {len(self.plot_manager.no_key_filenames)}"
)
return (
response_plots,
[str(s) for s, _ in self.plot_manager.failed_to_open_filenames.items()],
[str(s) for s in self.plot_manager.no_key_filenames],
)
def delete_plot(self, str_path: str):
remove_plot(Path(str_path))
self.plot_manager.trigger_refresh()
self._state_changed("plots")
return True
async def add_plot_directory(self, str_path: str) -> bool:
add_plot_directory(self.root_path, str_path)
self.plot_manager.trigger_refresh()
return True
async def get_plot_directories(self) -> List[str]:
return get_plot_directories(self.root_path)
async def remove_plot_directory(self, str_path: str) -> bool:
remove_plot_directory(self.root_path, str_path)
self.plot_manager.trigger_refresh()
return True
def set_server(self, server):
self.server = server
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/harvester/harvester.py
| 0.693992 | 0.185209 |
harvester.py
|
pypi
|
from typing import Dict, List
from sortedcontainers import SortedDict
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.mempool_item import MempoolItem
class Mempool:
def __init__(self, max_size_in_cost: int):
self.spends: Dict[bytes32, MempoolItem] = {}
self.sorted_spends: SortedDict = SortedDict()
self.additions: Dict[bytes32, MempoolItem] = {}
self.removals: Dict[bytes32, MempoolItem] = {}
self.max_size_in_cost: int = max_size_in_cost
self.total_mempool_cost: int = 0
def get_min_fee_rate(self, cost: int) -> float:
"""
Gets the minimum fpc rate that a transaction with specified cost will need in order to get included.
"""
if self.at_full_capacity(cost):
current_cost = self.total_mempool_cost
# Iterates through all spends in increasing fee per cost
for fee_per_cost, spends_with_fpc in self.sorted_spends.items():
for spend_name, item in spends_with_fpc.items():
current_cost -= item.cost
# Removing one at a time, until our transaction of size cost fits
if current_cost + cost <= self.max_size_in_cost:
return fee_per_cost
raise ValueError(
f"Transaction with cost {cost} does not fit in mempool of max cost {self.max_size_in_cost}"
)
else:
return 0
def remove_from_pool(self, item: MempoolItem):
"""
Removes an item from the mempool.
"""
removals: List[Coin] = item.removals
additions: List[Coin] = item.additions
for rem in removals:
del self.removals[rem.name()]
for add in additions:
del self.additions[add.name()]
del self.spends[item.name]
del self.sorted_spends[item.fee_per_cost][item.name]
dic = self.sorted_spends[item.fee_per_cost]
if len(dic.values()) == 0:
del self.sorted_spends[item.fee_per_cost]
self.total_mempool_cost -= item.cost
assert self.total_mempool_cost >= 0
def add_to_pool(
self,
item: MempoolItem,
):
"""
Adds an item to the mempool by kicking out transactions (if it doesn't fit), in order of increasing fee per cost
"""
while self.at_full_capacity(item.cost):
# Val is Dict[hash, MempoolItem]
fee_per_cost, val = self.sorted_spends.peekitem(index=0)
to_remove = list(val.values())[0]
self.remove_from_pool(to_remove)
self.spends[item.name] = item
# sorted_spends is Dict[float, Dict[bytes32, MempoolItem]]
if item.fee_per_cost not in self.sorted_spends:
self.sorted_spends[item.fee_per_cost] = {}
self.sorted_spends[item.fee_per_cost][item.name] = item
for add in item.additions:
self.additions[add.name()] = item
for coin in item.removals:
self.removals[coin.name()] = item
self.total_mempool_cost += item.cost
def at_full_capacity(self, cost: int) -> bool:
"""
Checks whether the mempool is at full capacity and cannot accept a transaction with size cost.
"""
return self.total_mempool_cost + cost > self.max_size_in_cost
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/full_node/mempool.py
| 0.885644 | 0.335473 |
mempool.py
|
pypi
|
import logging
import time
from typing import Dict, List, Optional
from clvm_rs import STRICT_MODE
from salvia.consensus.cost_calculator import NPCResult
from salvia.full_node.generator import create_generator_args, setup_generator_args
from salvia.types.blockchain_format.program import NIL
from salvia.types.coin_record import CoinRecord
from salvia.types.condition_with_args import ConditionWithArgs
from salvia.types.generator_types import BlockGenerator
from salvia.types.name_puzzle_condition import NPC
from salvia.util.clvm import int_from_bytes
from salvia.util.condition_tools import ConditionOpcode
from salvia.util.errors import Err
from salvia.util.ints import uint32, uint64, uint16
from salvia.wallet.puzzles.generator_loader import GENERATOR_FOR_SINGLE_COIN_MOD
from salvia.wallet.puzzles.rom_bootstrap_generator import get_generator
GENERATOR_MOD = get_generator()
log = logging.getLogger(__name__)
def mempool_assert_absolute_block_height_exceeds(
condition: ConditionWithArgs, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the next block index exceeds the block index from the condition
"""
try:
block_index_exceeds_this = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < block_index_exceeds_this:
return Err.ASSERT_HEIGHT_ABSOLUTE_FAILED
return None
def mempool_assert_relative_block_height_exceeds(
condition: ConditionWithArgs, unspent: CoinRecord, prev_transaction_block_height: uint32
) -> Optional[Err]:
"""
Checks if the coin age exceeds the age from the condition
"""
try:
expected_block_age = int_from_bytes(condition.vars[0])
block_index_exceeds_this = expected_block_age + unspent.confirmed_block_index
except ValueError:
return Err.INVALID_CONDITION
if prev_transaction_block_height < block_index_exceeds_this:
return Err.ASSERT_HEIGHT_RELATIVE_FAILED
return None
def mempool_assert_absolute_time_exceeds(condition: ConditionWithArgs, timestamp: uint64) -> Optional[Err]:
"""
Check if the current time in seconds exceeds the time specified by condition
"""
try:
expected_seconds = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if timestamp is None:
timestamp = uint64(int(time.time()))
if timestamp < expected_seconds:
return Err.ASSERT_SECONDS_ABSOLUTE_FAILED
return None
def mempool_assert_relative_time_exceeds(
condition: ConditionWithArgs, unspent: CoinRecord, timestamp: uint64
) -> Optional[Err]:
"""
Check if the current time in seconds exceeds the time specified by condition
"""
try:
expected_seconds = int_from_bytes(condition.vars[0])
except ValueError:
return Err.INVALID_CONDITION
if timestamp is None:
timestamp = uint64(int(time.time()))
if timestamp < expected_seconds + unspent.timestamp:
return Err.ASSERT_SECONDS_RELATIVE_FAILED
return None
def get_name_puzzle_conditions(
generator: BlockGenerator, max_cost: int, *, cost_per_byte: int, safe_mode: bool
) -> NPCResult:
block_program, block_program_args = setup_generator_args(generator)
max_cost -= len(bytes(generator.program)) * cost_per_byte
if max_cost < 0:
return NPCResult(uint16(Err.INVALID_BLOCK_COST.value), [], uint64(0))
flags = STRICT_MODE if safe_mode else 0
try:
err, result, clvm_cost = GENERATOR_MOD.run_as_generator(max_cost, flags, block_program, block_program_args)
if err is not None:
return NPCResult(uint16(err), [], uint64(0))
else:
npc_list = []
for r in result:
conditions = []
for c in r.conditions:
cwa = []
for cond_list in c[1]:
cwa.append(ConditionWithArgs(ConditionOpcode(bytes([cond_list.opcode])), cond_list.vars))
conditions.append((ConditionOpcode(bytes([c[0]])), cwa))
npc_list.append(NPC(r.coin_name, r.puzzle_hash, conditions))
return NPCResult(None, npc_list, uint64(clvm_cost))
except BaseException as e:
log.debug(f"get_name_puzzle_condition failed: {e}")
return NPCResult(uint16(Err.GENERATOR_RUNTIME_ERROR.value), [], uint64(0))
def get_puzzle_and_solution_for_coin(generator: BlockGenerator, coin_name: bytes, max_cost: int):
try:
block_program = generator.program
if not generator.generator_args:
block_program_args = [NIL]
else:
block_program_args = create_generator_args(generator.generator_refs())
cost, result = GENERATOR_FOR_SINGLE_COIN_MOD.run_with_cost(
max_cost, block_program, block_program_args, coin_name
)
puzzle = result.first()
solution = result.rest().first()
return None, puzzle, solution
except Exception as e:
return e, None, None
def mempool_check_conditions_dict(
unspent: CoinRecord,
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
prev_transaction_block_height: uint32,
timestamp: uint64,
) -> Optional[Err]:
"""
Check all conditions against current state.
"""
for con_list in conditions_dict.values():
cvp: ConditionWithArgs
for cvp in con_list:
error: Optional[Err] = None
if cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_ABSOLUTE:
error = mempool_assert_absolute_block_height_exceeds(cvp, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_HEIGHT_RELATIVE:
error = mempool_assert_relative_block_height_exceeds(cvp, unspent, prev_transaction_block_height)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_ABSOLUTE:
error = mempool_assert_absolute_time_exceeds(cvp, timestamp)
elif cvp.opcode is ConditionOpcode.ASSERT_SECONDS_RELATIVE:
error = mempool_assert_relative_time_exceeds(cvp, unspent, timestamp)
elif cvp.opcode is ConditionOpcode.ASSERT_MY_COIN_ID:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_PUZZLE_ANNOUNCEMENT:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_MY_PARENT_ID:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_MY_PUZZLEHASH:
assert False
elif cvp.opcode is ConditionOpcode.ASSERT_MY_AMOUNT:
assert False
if error:
return error
return None
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/full_node/mempool_check_conditions.py
| 0.799521 | 0.376967 |
mempool_check_conditions.py
|
pypi
|
import asyncio
import logging
from typing import Dict, List, Optional, Set, Tuple
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.ints import uint32, uint128
log = logging.getLogger(__name__)
class SyncStore:
# Whether or not we are syncing
sync_mode: bool
long_sync: bool
peak_to_peer: Dict[bytes32, Set[bytes32]] # Header hash : peer node id
peer_to_peak: Dict[bytes32, Tuple[bytes32, uint32, uint128]] # peer node id : [header_hash, height, weight]
sync_target_header_hash: Optional[bytes32] # Peak hash we are syncing towards
sync_target_height: Optional[uint32] # Peak height we are syncing towards
peers_changed: asyncio.Event
batch_syncing: Set[bytes32] # Set of nodes which we are batch syncing from
backtrack_syncing: Dict[bytes32, int] # Set of nodes which we are backtrack syncing from, and how many threads
@classmethod
async def create(cls):
self = cls()
self.sync_mode = False
self.long_sync = False
self.sync_target_header_hash = None
self.sync_target_height = None
self.peak_fork_point = {}
self.peak_to_peer = {}
self.peer_to_peak = {}
self.peers_changed = asyncio.Event()
self.batch_syncing = set()
self.backtrack_syncing = {}
return self
def set_peak_target(self, peak_hash: bytes32, target_height: uint32):
self.sync_target_header_hash = peak_hash
self.sync_target_height = target_height
def get_sync_target_hash(self) -> Optional[bytes32]:
return self.sync_target_header_hash
def get_sync_target_height(self) -> Optional[uint32]:
return self.sync_target_height
def set_sync_mode(self, sync_mode: bool):
self.sync_mode = sync_mode
def get_sync_mode(self) -> bool:
return self.sync_mode
def set_long_sync(self, long_sync: bool):
self.long_sync = long_sync
def get_long_sync(self) -> bool:
return self.long_sync
def seen_header_hash(self, header_hash: bytes32) -> bool:
return header_hash in self.peak_to_peer
def peer_has_block(self, header_hash: bytes32, peer_id: bytes32, weight: uint128, height: uint32, new_peak: bool):
"""
Adds a record that a certain peer has a block.
"""
if header_hash == self.sync_target_header_hash:
self.peers_changed.set()
if header_hash in self.peak_to_peer:
self.peak_to_peer[header_hash].add(peer_id)
else:
self.peak_to_peer[header_hash] = {peer_id}
if new_peak:
self.peer_to_peak[peer_id] = (header_hash, height, weight)
def get_peers_that_have_peak(self, header_hashes: List[bytes32]) -> Set[bytes32]:
"""
Returns: peer ids of peers that have at least one of the header hashes.
"""
node_ids: Set[bytes32] = set()
for header_hash in header_hashes:
if header_hash in self.peak_to_peer:
for node_id in self.peak_to_peer[header_hash]:
node_ids.add(node_id)
return node_ids
def get_peak_of_each_peer(self) -> Dict[bytes32, Tuple[bytes32, uint32, uint128]]:
"""
Returns: dictionary of peer id to peak information.
"""
ret = {}
for peer_id, v in self.peer_to_peak.items():
if v[0] not in self.peak_to_peer:
continue
ret[peer_id] = v
return ret
def get_heaviest_peak(self) -> Optional[Tuple[bytes32, uint32, uint128]]:
"""
Returns: the header_hash, height, and weight of the heaviest block that one of our peers has notified
us of.
"""
if len(self.peer_to_peak) == 0:
return None
heaviest_peak_hash: Optional[bytes32] = None
heaviest_peak_weight: uint128 = uint128(0)
heaviest_peak_height: Optional[uint32] = None
for peer_id, (peak_hash, height, weight) in self.peer_to_peak.items():
if peak_hash not in self.peak_to_peer:
continue
if heaviest_peak_hash is None or weight > heaviest_peak_weight:
heaviest_peak_hash = peak_hash
heaviest_peak_weight = weight
heaviest_peak_height = height
assert heaviest_peak_hash is not None and heaviest_peak_weight is not None and heaviest_peak_height is not None
return heaviest_peak_hash, heaviest_peak_height, heaviest_peak_weight
async def clear_sync_info(self):
"""
Clears the peak_to_peer info which can get quite large.
"""
self.peak_to_peer = {}
def peer_disconnected(self, node_id: bytes32):
if node_id in self.peer_to_peak:
del self.peer_to_peak[node_id]
for peak, peers in self.peak_to_peer.items():
if node_id in peers:
self.peak_to_peer[peak].remove(node_id)
assert node_id not in self.peak_to_peer[peak]
self.peers_changed.set()
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/full_node/sync_store.py
| 0.852153 | 0.352536 |
sync_store.py
|
pypi
|
import asyncio
import collections
import dataclasses
import logging
import time
from concurrent.futures.process import ProcessPoolExecutor
from typing import Dict, List, Optional, Set, Tuple
from blspy import G1Element, GTElement
from chiabip158 import PyBIP158
from salvia.util import cached_bls
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from salvia.full_node.bundle_tools import simple_solution_generator
from salvia.full_node.coin_store import CoinStore
from salvia.full_node.mempool import Mempool
from salvia.full_node.mempool_check_conditions import mempool_check_conditions_dict, get_name_puzzle_conditions
from salvia.full_node.pending_tx_cache import PendingTxCache
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import SerializedProgram
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.coin_record import CoinRecord
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.types.condition_with_args import ConditionWithArgs
from salvia.types.mempool_inclusion_status import MempoolInclusionStatus
from salvia.types.mempool_item import MempoolItem
from salvia.types.spend_bundle import SpendBundle
from salvia.util.cached_bls import LOCAL_CACHE
from salvia.util.clvm import int_from_bytes
from salvia.util.condition_tools import pkm_pairs
from salvia.util.errors import Err, ValidationError
from salvia.util.generator_tools import additions_for_npc
from salvia.util.ints import uint32, uint64
from salvia.util.lru_cache import LRUCache
from salvia.util.streamable import recurse_jsonify
log = logging.getLogger(__name__)
def validate_clvm_and_signature(
spend_bundle_bytes: bytes, max_cost: int, cost_per_byte: int, additional_data: bytes
) -> Tuple[Optional[Err], bytes, Dict[bytes, bytes]]:
"""
Validates CLVM and aggregate signature for a spendbundle. This is meant to be called under a ProcessPoolExecutor
in order to validate the heavy parts of a transction in a different thread. Returns an optional error,
the NPCResult and a cache of the new pairings validated (if not error)
"""
try:
bundle: SpendBundle = SpendBundle.from_bytes(spend_bundle_bytes)
program = simple_solution_generator(bundle)
# npc contains names of the coins removed, puzzle_hashes and their spend conditions
result: NPCResult = get_name_puzzle_conditions(program, max_cost, cost_per_byte=cost_per_byte, safe_mode=True)
if result.error is not None:
return Err(result.error), b"", {}
pks: List[G1Element] = []
msgs: List[bytes32] = []
pks, msgs = pkm_pairs(result.npc_list, additional_data)
# Verify aggregated signature
cache: LRUCache = LRUCache(10000)
if not cached_bls.aggregate_verify(pks, msgs, bundle.aggregated_signature, True, cache):
return Err.BAD_AGGREGATE_SIGNATURE, b"", {}
new_cache_entries: Dict[bytes, bytes] = {}
for k, v in cache.cache.items():
new_cache_entries[k] = bytes(v)
except ValidationError as e:
return e.code, b"", {}
except Exception:
return Err.UNKNOWN, b"", {}
return None, bytes(result), new_cache_entries
class MempoolManager:
def __init__(self, coin_store: CoinStore, consensus_constants: ConsensusConstants):
self.constants: ConsensusConstants = consensus_constants
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
# Keep track of seen spend_bundles
self.seen_bundle_hashes: Dict[bytes32, bytes32] = {}
self.coin_store = coin_store
self.lock = asyncio.Lock()
# The fee per cost must be above this amount to consider the fee "nonzero", and thus able to kick out other
# transactions. This prevents spam. This is equivalent to 0.055 XSLV per block, or about 0.00005 XSLV for two
# spends.
self.nonzero_fee_minimum_fpc = 5
self.limit_factor = 0.5
self.mempool_max_total_cost = int(self.constants.MAX_BLOCK_COST_CLVM * self.constants.MEMPOOL_BLOCK_BUFFER)
# Transactions that were unable to enter mempool, used for retry. (they were invalid)
self.potential_cache = PendingTxCache(self.constants.MAX_BLOCK_COST_CLVM * 5)
self.seen_cache_size = 10000
self.pool = ProcessPoolExecutor(max_workers=2)
# The mempool will correspond to a certain peak
self.peak: Optional[BlockRecord] = None
self.mempool: Mempool = Mempool(self.mempool_max_total_cost)
def shut_down(self):
self.pool.shutdown(wait=True)
async def create_bundle_from_mempool(
self, last_tb_header_hash: bytes32
) -> Optional[Tuple[SpendBundle, List[Coin], List[Coin]]]:
"""
Returns aggregated spendbundle that can be used for creating new block,
additions and removals in that spend_bundle
"""
if self.peak is None or self.peak.header_hash != last_tb_header_hash:
return None
cost_sum = 0 # Checks that total cost does not exceed block maximum
fee_sum = 0 # Checks that total fees don't exceed 64 bits
spend_bundles: List[SpendBundle] = []
removals = []
additions = []
broke_from_inner_loop = False
log.info(f"Starting to make block, max cost: {self.constants.MAX_BLOCK_COST_CLVM}")
for dic in reversed(self.mempool.sorted_spends.values()):
if broke_from_inner_loop:
break
for item in dic.values():
log.info(f"Cumulative cost: {cost_sum}, fee per cost: {item.fee / item.cost}")
if (
item.cost + cost_sum <= self.limit_factor * self.constants.MAX_BLOCK_COST_CLVM
and item.fee + fee_sum <= self.constants.MAX_COIN_AMOUNT
):
spend_bundles.append(item.spend_bundle)
cost_sum += item.cost
fee_sum += item.fee
removals.extend(item.removals)
additions.extend(item.additions)
else:
broke_from_inner_loop = True
break
if len(spend_bundles) > 0:
log.info(
f"Cumulative cost of block (real cost should be less) {cost_sum}. Proportion "
f"full: {cost_sum / self.constants.MAX_BLOCK_COST_CLVM}"
)
agg = SpendBundle.aggregate(spend_bundles)
return agg, additions, removals
else:
return None
def get_filter(self) -> bytes:
all_transactions: Set[bytes32] = set()
byte_array_list = []
for key, _ in self.mempool.spends.items():
if key not in all_transactions:
all_transactions.add(key)
byte_array_list.append(bytearray(key))
tx_filter: PyBIP158 = PyBIP158(byte_array_list)
return bytes(tx_filter.GetEncoded())
def is_fee_enough(self, fees: uint64, cost: uint64) -> bool:
"""
Determines whether any of the pools can accept a transaction with a given fees
and cost.
"""
if cost == 0:
return False
fees_per_cost = fees / cost
if not self.mempool.at_full_capacity(cost) or (
fees_per_cost >= self.nonzero_fee_minimum_fpc and fees_per_cost > self.mempool.get_min_fee_rate(cost)
):
return True
return False
def add_and_maybe_pop_seen(self, spend_name: bytes32):
self.seen_bundle_hashes[spend_name] = spend_name
while len(self.seen_bundle_hashes) > self.seen_cache_size:
first_in = list(self.seen_bundle_hashes.keys())[0]
self.seen_bundle_hashes.pop(first_in)
def seen(self, bundle_hash: bytes32) -> bool:
"""Return true if we saw this spendbundle recently"""
return bundle_hash in self.seen_bundle_hashes
def remove_seen(self, bundle_hash: bytes32):
if bundle_hash in self.seen_bundle_hashes:
self.seen_bundle_hashes.pop(bundle_hash)
@staticmethod
def get_min_fee_increase() -> int:
# 0.00001 XSLV
return 10000000
def can_replace(
self,
conflicting_items: Dict[bytes32, MempoolItem],
removals: Dict[bytes32, CoinRecord],
fees: uint64,
fees_per_cost: float,
) -> bool:
conflicting_fees = 0
conflicting_cost = 0
for item in conflicting_items.values():
conflicting_fees += item.fee
conflicting_cost += item.cost
# All coins spent in all conflicting items must also be spent in
# the new item
for coin in item.removals:
if coin.name() not in removals:
log.debug(f"Rejecting conflicting tx as it does not spend conflicting coin {coin.name()}")
return False
# New item must have higher fee per cost
conflicting_fees_per_cost = conflicting_fees / conflicting_cost
if fees_per_cost <= conflicting_fees_per_cost:
log.debug(
f"Rejecting conflicting tx due to not increasing fees per cost "
f"({fees_per_cost} <= {conflicting_fees_per_cost})"
)
return False
# New item must increase the total fee at least by a certain amount
fee_increase = fees - conflicting_fees
if fee_increase < self.get_min_fee_increase():
log.debug(f"Rejecting conflicting tx due to low fee increase ({fee_increase})")
return False
log.info(f"Replacing conflicting tx in mempool. New tx fee: {fees}, old tx fees: {conflicting_fees}")
return True
async def pre_validate_spendbundle(
self, new_spend: SpendBundle, new_spend_bytes: Optional[bytes], spend_name: bytes32
) -> NPCResult:
"""
Errors are included within the cached_result.
This runs in another process so we don't block the main thread
"""
start_time = time.time()
if new_spend_bytes is None:
new_spend_bytes = bytes(new_spend)
err, cached_result_bytes, new_cache_entries = await asyncio.get_running_loop().run_in_executor(
self.pool,
validate_clvm_and_signature,
new_spend_bytes,
int(self.limit_factor * self.constants.MAX_BLOCK_COST_CLVM),
self.constants.COST_PER_BYTE,
self.constants.AGG_SIG_ME_ADDITIONAL_DATA,
)
if err is not None:
raise ValidationError(err)
for cache_entry_key, cached_entry_value in new_cache_entries.items():
LOCAL_CACHE.put(cache_entry_key, GTElement.from_bytes(cached_entry_value))
ret = NPCResult.from_bytes(cached_result_bytes)
end_time = time.time()
log.debug(f"pre_validate_spendbundle took {end_time - start_time:0.4f} seconds for {spend_name}")
return ret
async def add_spendbundle(
self,
new_spend: SpendBundle,
npc_result: NPCResult,
spend_name: bytes32,
program: Optional[SerializedProgram] = None,
) -> Tuple[Optional[uint64], MempoolInclusionStatus, Optional[Err]]:
"""
Tries to add spend bundle to the mempool
Returns the cost (if SUCCESS), the result (MempoolInclusion status), and an optional error
"""
start_time = time.time()
if self.peak is None:
return None, MempoolInclusionStatus.FAILED, Err.MEMPOOL_NOT_INITIALIZED
npc_list = npc_result.npc_list
assert npc_result.error is None
if program is None:
program = simple_solution_generator(new_spend).program
cost = calculate_cost_of_program(program, npc_result, self.constants.COST_PER_BYTE)
log.debug(f"Cost: {cost}")
if cost > int(self.limit_factor * self.constants.MAX_BLOCK_COST_CLVM):
# we shouldn't ever end up here, since the cost is limited when we
# execute the CLVM program.
return None, MempoolInclusionStatus.FAILED, Err.BLOCK_COST_EXCEEDS_MAX
# build removal list
removal_names: List[bytes32] = [npc.coin_name for npc in npc_list]
if set(removal_names) != set([s.name() for s in new_spend.removals()]):
return None, MempoolInclusionStatus.FAILED, Err.INVALID_SPEND_BUNDLE
additions = additions_for_npc(npc_list)
additions_dict: Dict[bytes32, Coin] = {}
for add in additions:
additions_dict[add.name()] = add
addition_amount = uint64(0)
# Check additions for max coin amount
for coin in additions:
if coin.amount < 0:
return (
None,
MempoolInclusionStatus.FAILED,
Err.COIN_AMOUNT_NEGATIVE,
)
if coin.amount > self.constants.MAX_COIN_AMOUNT:
return (
None,
MempoolInclusionStatus.FAILED,
Err.COIN_AMOUNT_EXCEEDS_MAXIMUM,
)
addition_amount = uint64(addition_amount + coin.amount)
# Check for duplicate outputs
addition_counter = collections.Counter(_.name() for _ in additions)
for k, v in addition_counter.items():
if v > 1:
return None, MempoolInclusionStatus.FAILED, Err.DUPLICATE_OUTPUT
# Check for duplicate inputs
removal_counter = collections.Counter(name for name in removal_names)
for k, v in removal_counter.items():
if v > 1:
return None, MempoolInclusionStatus.FAILED, Err.DOUBLE_SPEND
# Skip if already added
if spend_name in self.mempool.spends:
return uint64(cost), MempoolInclusionStatus.SUCCESS, None
removal_record_dict: Dict[bytes32, CoinRecord] = {}
removal_coin_dict: Dict[bytes32, Coin] = {}
removal_amount = uint64(0)
for name in removal_names:
removal_record = await self.coin_store.get_coin_record(name)
if removal_record is None and name not in additions_dict:
return None, MempoolInclusionStatus.FAILED, Err.UNKNOWN_UNSPENT
elif name in additions_dict:
removal_coin = additions_dict[name]
# TODO(straya): what timestamp to use here?
assert self.peak.timestamp is not None
removal_record = CoinRecord(
removal_coin,
uint32(self.peak.height + 1), # In mempool, so will be included in next height
uint32(0),
False,
False,
uint64(self.peak.timestamp + 1),
)
assert removal_record is not None
removal_amount = uint64(removal_amount + removal_record.coin.amount)
removal_record_dict[name] = removal_record
removal_coin_dict[name] = removal_record.coin
removals: List[Coin] = [coin for coin in removal_coin_dict.values()]
if addition_amount > removal_amount:
print(addition_amount, removal_amount)
return None, MempoolInclusionStatus.FAILED, Err.MINTING_COIN
fees = uint64(removal_amount - addition_amount)
assert_fee_sum: uint64 = uint64(0)
for npc in npc_list:
if ConditionOpcode.RESERVE_FEE in npc.condition_dict:
fee_list: List[ConditionWithArgs] = npc.condition_dict[ConditionOpcode.RESERVE_FEE]
for cvp in fee_list:
fee = int_from_bytes(cvp.vars[0])
if fee < 0:
return None, MempoolInclusionStatus.FAILED, Err.RESERVE_FEE_CONDITION_FAILED
assert_fee_sum = assert_fee_sum + fee
if fees < assert_fee_sum:
return (
None,
MempoolInclusionStatus.FAILED,
Err.RESERVE_FEE_CONDITION_FAILED,
)
if cost == 0:
return None, MempoolInclusionStatus.FAILED, Err.UNKNOWN
fees_per_cost: float = fees / cost
# If pool is at capacity check the fee, if not then accept even without the fee
if self.mempool.at_full_capacity(cost):
if fees_per_cost < self.nonzero_fee_minimum_fpc:
return None, MempoolInclusionStatus.FAILED, Err.INVALID_FEE_TOO_CLOSE_TO_ZERO
if fees_per_cost <= self.mempool.get_min_fee_rate(cost):
return None, MempoolInclusionStatus.FAILED, Err.INVALID_FEE_LOW_FEE
# Check removals against UnspentDB + DiffStore + Mempool + SpendBundle
# Use this information later when constructing a block
fail_reason, conflicts = await self.check_removals(removal_record_dict)
# If there is a mempool conflict check if this spendbundle has a higher fee per cost than all others
tmp_error: Optional[Err] = None
conflicting_pool_items: Dict[bytes32, MempoolItem] = {}
if fail_reason is Err.MEMPOOL_CONFLICT:
for conflicting in conflicts:
sb: MempoolItem = self.mempool.removals[conflicting.name()]
conflicting_pool_items[sb.name] = sb
if not self.can_replace(conflicting_pool_items, removal_record_dict, fees, fees_per_cost):
potential = MempoolItem(
new_spend, uint64(fees), npc_result, cost, spend_name, additions, removals, program
)
self.potential_cache.add(potential)
return (
uint64(cost),
MempoolInclusionStatus.PENDING,
Err.MEMPOOL_CONFLICT,
)
elif fail_reason:
return None, MempoolInclusionStatus.FAILED, fail_reason
if tmp_error:
return None, MempoolInclusionStatus.FAILED, tmp_error
# Verify conditions, create hash_key list for aggsig check
error: Optional[Err] = None
for npc in npc_list:
coin_record: CoinRecord = removal_record_dict[npc.coin_name]
# Check that the revealed removal puzzles actually match the puzzle hash
if npc.puzzle_hash != coin_record.coin.puzzle_hash:
log.warning("Mempool rejecting transaction because of wrong puzzle_hash")
log.warning(f"{npc.puzzle_hash} != {coin_record.coin.puzzle_hash}")
return None, MempoolInclusionStatus.FAILED, Err.WRONG_PUZZLE_HASH
salvialisp_height = (
self.peak.prev_transaction_block_height if not self.peak.is_transaction_block else self.peak.height
)
assert self.peak.timestamp is not None
error = mempool_check_conditions_dict(
coin_record,
npc.condition_dict,
uint32(salvialisp_height),
self.peak.timestamp,
)
if error:
if error is Err.ASSERT_HEIGHT_ABSOLUTE_FAILED or error is Err.ASSERT_HEIGHT_RELATIVE_FAILED:
potential = MempoolItem(
new_spend, uint64(fees), npc_result, cost, spend_name, additions, removals, program
)
self.potential_cache.add(potential)
return uint64(cost), MempoolInclusionStatus.PENDING, error
break
if error:
return None, MempoolInclusionStatus.FAILED, error
# Remove all conflicting Coins and SpendBundles
if fail_reason:
mempool_item: MempoolItem
for mempool_item in conflicting_pool_items.values():
self.mempool.remove_from_pool(mempool_item)
new_item = MempoolItem(new_spend, uint64(fees), npc_result, cost, spend_name, additions, removals, program)
self.mempool.add_to_pool(new_item)
now = time.time()
log.log(
logging.DEBUG,
f"add_spendbundle {spend_name} took {now - start_time:0.2f} seconds. "
f"Cost: {cost} ({round(100.0 * cost/self.constants.MAX_BLOCK_COST_CLVM, 3)}% of max block cost)",
)
return uint64(cost), MempoolInclusionStatus.SUCCESS, None
async def check_removals(self, removals: Dict[bytes32, CoinRecord]) -> Tuple[Optional[Err], List[Coin]]:
"""
This function checks for double spends, unknown spends and conflicting transactions in mempool.
Returns Error (if any), dictionary of Unspents, list of coins with conflict errors (if any any).
Note that additions are not checked for duplicates, because having duplicate additions requires also
having duplicate removals.
"""
assert self.peak is not None
conflicts: List[Coin] = []
for record in removals.values():
removal = record.coin
# 1. Checks if it's been spent already
if record.spent == 1:
return Err.DOUBLE_SPEND, []
# 2. Checks if there's a mempool conflict
if removal.name() in self.mempool.removals:
conflicts.append(removal)
if len(conflicts) > 0:
return Err.MEMPOOL_CONFLICT, conflicts
# 5. If coins can be spent return list of unspents as we see them in local storage
return None, []
def get_spendbundle(self, bundle_hash: bytes32) -> Optional[SpendBundle]:
"""Returns a full SpendBundle if it's inside one the mempools"""
if bundle_hash in self.mempool.spends:
return self.mempool.spends[bundle_hash].spend_bundle
return None
def get_mempool_item(self, bundle_hash: bytes32) -> Optional[MempoolItem]:
"""Returns a MempoolItem if it's inside one the mempools"""
if bundle_hash in self.mempool.spends:
return self.mempool.spends[bundle_hash]
return None
async def new_peak(
self, new_peak: Optional[BlockRecord], coin_changes: List[CoinRecord]
) -> List[Tuple[SpendBundle, NPCResult, bytes32]]:
"""
Called when a new peak is available, we try to recreate a mempool for the new tip.
"""
if new_peak is None:
return []
if new_peak.is_transaction_block is False:
return []
if self.peak == new_peak:
return []
assert new_peak.timestamp is not None
use_optimization: bool = self.peak is not None and new_peak.prev_transaction_block_hash == self.peak.header_hash
self.peak = new_peak
if use_optimization:
changed_coins_set: Set[bytes32] = set()
for coin_record in coin_changes:
changed_coins_set.add(coin_record.coin.name())
old_pool = self.mempool
self.mempool = Mempool(self.mempool_max_total_cost)
for item in old_pool.spends.values():
if use_optimization:
# If use_optimization, we will automatically re-add all bundles where none of it's removals were
# spend (since we only advanced 1 transaction block). This is a nice benefit of the coin set model
# vs account model, all spends are guaranteed to succeed.
failed = False
for removed_coin in item.removals:
if removed_coin.name() in changed_coins_set:
failed = True
break
if not failed:
self.mempool.add_to_pool(item)
else:
# If the spend bundle was confirmed or conflicting (can no longer be in mempool), it won't be
# successfully added to the new mempool. In this case, remove it from seen, so in the case of a
# reorg, it can be resubmitted
self.remove_seen(item.spend_bundle_name)
else:
_, result, _ = await self.add_spendbundle(
item.spend_bundle, item.npc_result, item.spend_bundle_name, item.program
)
# If the spend bundle was confirmed or conflicting (can no longer be in mempool), it won't be
# successfully added to the new mempool. In this case, remove it from seen, so in the case of a reorg,
# it can be resubmitted
if result != MempoolInclusionStatus.SUCCESS:
self.remove_seen(item.spend_bundle_name)
potential_txs = self.potential_cache.drain()
txs_added = []
for item in potential_txs.values():
cost, status, error = await self.add_spendbundle(
item.spend_bundle, item.npc_result, item.spend_bundle_name, program=item.program
)
if status == MempoolInclusionStatus.SUCCESS:
txs_added.append((item.spend_bundle, item.npc_result, item.spend_bundle_name))
log.info(
f"Size of mempool: {len(self.mempool.spends)} spends, cost: {self.mempool.total_mempool_cost} "
f"minimum fee to get in: {self.mempool.get_min_fee_rate(100000)}"
)
return txs_added
async def get_items_not_in_filter(self, mempool_filter: PyBIP158, limit: int = 100) -> List[MempoolItem]:
items: List[MempoolItem] = []
counter = 0
broke_from_inner_loop = False
# Send 100 with highest fee per cost
for dic in self.mempool.sorted_spends.values():
if broke_from_inner_loop:
break
for item in dic.values():
if counter == limit:
broke_from_inner_loop = True
break
if mempool_filter.Match(bytearray(item.spend_bundle_name)):
continue
items.append(item)
counter += 1
return items
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/full_node/mempool_manager.py
| 0.835852 | 0.259269 |
mempool_manager.py
|
pypi
|
import asyncio
import dataclasses
import logging
import time
from typing import Dict, List, Optional, Set, Tuple
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.difficulty_adjustment import can_finish_sub_and_full_epoch
from salvia.consensus.make_sub_epoch_summary import next_sub_epoch_summary
from salvia.consensus.multiprocess_validation import PreValidationResult
from salvia.consensus.pot_iterations import calculate_sp_interval_iters
from salvia.full_node.signage_point import SignagePoint
from salvia.protocols import timelord_protocol
from salvia.server.outbound_message import Message
from salvia.types.blockchain_format.classgroup import ClassgroupElement
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.blockchain_format.vdf import VDFInfo
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.types.full_block import FullBlock
from salvia.types.generator_types import CompressorArg
from salvia.types.unfinished_block import UnfinishedBlock
from salvia.util.ints import uint8, uint32, uint64, uint128
from salvia.util.lru_cache import LRUCache
from salvia.util.streamable import Streamable, streamable
log = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True)
@streamable
class FullNodeStorePeakResult(Streamable):
added_eos: Optional[EndOfSubSlotBundle]
new_signage_points: List[Tuple[uint8, SignagePoint]]
new_infusion_points: List[timelord_protocol.NewInfusionPointVDF]
class FullNodeStore:
constants: ConsensusConstants
# Blocks which we have created, but don't have plot signatures yet, so not yet "unfinished blocks"
candidate_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
candidate_backup_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock]]
# Header hashes of unfinished blocks that we have seen recently
seen_unfinished_blocks: set
# Unfinished blocks, keyed from reward hash
unfinished_blocks: Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]
# Finished slots and sps from the peak's slot onwards
# We store all 32 SPs for each slot, starting as 32 Nones and filling them as we go
# Also stores the total iters at the end of slot
# For the first sub-slot, EndOfSlotBundle is None
finished_sub_slots: List[Tuple[Optional[EndOfSubSlotBundle], List[Optional[SignagePoint]], uint128]]
# These caches maintain objects which depend on infused blocks in the reward chain, that we
# might receive before the blocks themselves. The dict keys are the reward chain challenge hashes.
# End of slots which depend on infusions that we don't have
future_eos_cache: Dict[bytes32, List[EndOfSubSlotBundle]]
# Signage points which depend on infusions that we don't have
future_sp_cache: Dict[bytes32, List[Tuple[uint8, SignagePoint]]]
# Infusion point VDFs which depend on infusions that we don't have
future_ip_cache: Dict[bytes32, List[timelord_protocol.NewInfusionPointVDF]]
# This stores the time that each key was added to the future cache, so we can clear old keys
future_cache_key_times: Dict[bytes32, int]
# These recent caches are for pooling support
recent_signage_points: LRUCache
recent_eos: LRUCache
# Partial hashes of unfinished blocks we are requesting
requesting_unfinished_blocks: Set[bytes32]
previous_generator: Optional[CompressorArg]
pending_tx_request: Dict[bytes32, bytes32] # tx_id: peer_id
peers_with_tx: Dict[bytes32, Set[bytes32]] # tx_id: Set[peer_ids}
tx_fetch_tasks: Dict[bytes32, asyncio.Task] # Task id: task
serialized_wp_message: Optional[Message]
serialized_wp_message_tip: Optional[bytes32]
def __init__(self, constants: ConsensusConstants):
self.candidate_blocks = {}
self.candidate_backup_blocks = {}
self.seen_unfinished_blocks = set()
self.unfinished_blocks = {}
self.finished_sub_slots = []
self.future_eos_cache = {}
self.future_sp_cache = {}
self.future_ip_cache = {}
self.recent_signage_points = LRUCache(500)
self.recent_eos = LRUCache(50)
self.requesting_unfinished_blocks = set()
self.previous_generator = None
self.future_cache_key_times = {}
self.constants = constants
self.clear_slots()
self.initialize_genesis_sub_slot()
self.pending_tx_request = {}
self.peers_with_tx = {}
self.tx_fetch_tasks = {}
self.serialized_wp_message = None
self.serialized_wp_message_tip = None
def add_candidate_block(
self, quality_string: bytes32, height: uint32, unfinished_block: UnfinishedBlock, backup: bool = False
):
if backup:
self.candidate_backup_blocks[quality_string] = (height, unfinished_block)
else:
self.candidate_blocks[quality_string] = (height, unfinished_block)
def get_candidate_block(
self, quality_string: bytes32, backup: bool = False
) -> Optional[Tuple[uint32, UnfinishedBlock]]:
if backup:
return self.candidate_backup_blocks.get(quality_string, None)
else:
return self.candidate_blocks.get(quality_string, None)
def clear_candidate_blocks_below(self, height: uint32) -> None:
del_keys = []
for key, value in self.candidate_blocks.items():
if value[0] < height:
del_keys.append(key)
for key in del_keys:
try:
del self.candidate_blocks[key]
except KeyError:
pass
del_keys = []
for key, value in self.candidate_backup_blocks.items():
if value[0] < height:
del_keys.append(key)
for key in del_keys:
try:
del self.candidate_backup_blocks[key]
except KeyError:
pass
def seen_unfinished_block(self, object_hash: bytes32) -> bool:
if object_hash in self.seen_unfinished_blocks:
return True
self.seen_unfinished_blocks.add(object_hash)
return False
def clear_seen_unfinished_blocks(self) -> None:
self.seen_unfinished_blocks.clear()
def add_unfinished_block(
self, height: uint32, unfinished_block: UnfinishedBlock, result: PreValidationResult
) -> None:
self.unfinished_blocks[unfinished_block.partial_hash] = (height, unfinished_block, result)
def get_unfinished_block(self, unfinished_reward_hash: bytes32) -> Optional[UnfinishedBlock]:
result = self.unfinished_blocks.get(unfinished_reward_hash, None)
if result is None:
return None
return result[1]
def get_unfinished_block_result(self, unfinished_reward_hash: bytes32) -> Optional[PreValidationResult]:
result = self.unfinished_blocks.get(unfinished_reward_hash, None)
if result is None:
return None
return result[2]
def get_unfinished_blocks(self) -> Dict[bytes32, Tuple[uint32, UnfinishedBlock, PreValidationResult]]:
return self.unfinished_blocks
def clear_unfinished_blocks_below(self, height: uint32) -> None:
del_keys: List[bytes32] = []
for partial_reward_hash, (unf_height, unfinished_block, _) in self.unfinished_blocks.items():
if unf_height < height:
del_keys.append(partial_reward_hash)
for del_key in del_keys:
del self.unfinished_blocks[del_key]
def remove_unfinished_block(self, partial_reward_hash: bytes32):
if partial_reward_hash in self.unfinished_blocks:
del self.unfinished_blocks[partial_reward_hash]
def add_to_future_ip(self, infusion_point: timelord_protocol.NewInfusionPointVDF):
ch: bytes32 = infusion_point.reward_chain_ip_vdf.challenge
if ch not in self.future_ip_cache:
self.future_ip_cache[ch] = []
self.future_ip_cache[ch].append(infusion_point)
def in_future_sp_cache(self, signage_point: SignagePoint, index: uint8) -> bool:
if signage_point.rc_vdf is None:
return False
if signage_point.rc_vdf.challenge not in self.future_sp_cache:
return False
for cache_index, cache_sp in self.future_sp_cache[signage_point.rc_vdf.challenge]:
if cache_index == index and cache_sp.rc_vdf == signage_point.rc_vdf:
return True
return False
def add_to_future_sp(self, signage_point: SignagePoint, index: uint8):
# We are missing a block here
if (
signage_point.cc_vdf is None
or signage_point.rc_vdf is None
or signage_point.cc_proof is None
or signage_point.rc_proof is None
):
return None
if signage_point.rc_vdf.challenge not in self.future_sp_cache:
self.future_sp_cache[signage_point.rc_vdf.challenge] = []
if self.in_future_sp_cache(signage_point, index):
return None
self.future_cache_key_times[signage_point.rc_vdf.challenge] = int(time.time())
self.future_sp_cache[signage_point.rc_vdf.challenge].append((index, signage_point))
log.info(f"Don't have rc hash {signage_point.rc_vdf.challenge}. caching signage point {index}.")
def get_future_ip(self, rc_challenge_hash: bytes32) -> List[timelord_protocol.NewInfusionPointVDF]:
return self.future_ip_cache.get(rc_challenge_hash, [])
def clear_old_cache_entries(self) -> None:
current_time: int = int(time.time())
remove_keys: List[bytes32] = []
for rc_hash, time_added in self.future_cache_key_times.items():
if current_time - time_added > 3600:
remove_keys.append(rc_hash)
for k in remove_keys:
self.future_cache_key_times.pop(k, None)
self.future_ip_cache.pop(k, [])
self.future_eos_cache.pop(k, [])
self.future_sp_cache.pop(k, [])
def clear_slots(self):
self.finished_sub_slots.clear()
def get_sub_slot(self, challenge_hash: bytes32) -> Optional[Tuple[EndOfSubSlotBundle, int, uint128]]:
assert len(self.finished_sub_slots) >= 1
for index, (sub_slot, _, total_iters) in enumerate(self.finished_sub_slots):
if sub_slot is not None and sub_slot.challenge_chain.get_hash() == challenge_hash:
return sub_slot, index, total_iters
return None
def initialize_genesis_sub_slot(self):
self.clear_slots()
self.finished_sub_slots = [(None, [None] * self.constants.NUM_SPS_SUB_SLOT, uint128(0))]
def new_finished_sub_slot(
self,
eos: EndOfSubSlotBundle,
blocks: BlockchainInterface,
peak: Optional[BlockRecord],
peak_full_block: Optional[FullBlock],
) -> Optional[List[timelord_protocol.NewInfusionPointVDF]]:
"""
Returns false if not added. Returns a list if added. The list contains all infusion points that depended
on this sub slot
"""
assert len(self.finished_sub_slots) >= 1
assert (peak is None) == (peak_full_block is None)
last_slot, _, last_slot_iters = self.finished_sub_slots[-1]
cc_challenge: bytes32 = (
last_slot.challenge_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE
)
rc_challenge: bytes32 = (
last_slot.reward_chain.get_hash() if last_slot is not None else self.constants.GENESIS_CHALLENGE
)
icc_challenge: Optional[bytes32] = None
icc_iters: Optional[uint64] = None
# Skip if already present
for slot, _, _ in self.finished_sub_slots:
if slot == eos:
return []
if eos.challenge_chain.challenge_chain_end_of_slot_vdf.challenge != cc_challenge:
# This slot does not append to our next slot
# This prevent other peers from appending fake VDFs to our cache
return None
if peak is None:
sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
else:
sub_slot_iters = peak.sub_slot_iters
total_iters = uint128(last_slot_iters + sub_slot_iters)
if peak is not None and peak.total_iters > last_slot_iters:
# Peak is in this slot
# Note: Adding an end of subslot does not lock the blockchain, for performance reasons. Only the
# timelord_lock is used. Therefore, it's possible that we add a new peak at the same time as seeing
# the finished subslot, and the peak is not fully added yet, so it looks like we still need the subslot.
# In that case, we will exit here and let the new_peak code add the subslot.
if total_iters < peak.total_iters:
return None
rc_challenge = eos.reward_chain.end_of_slot_vdf.challenge
cc_start_element = peak.challenge_vdf_output
iters = uint64(total_iters - peak.total_iters)
if peak.reward_infusion_new_challenge != rc_challenge:
# We don't have this challenge hash yet
if rc_challenge not in self.future_eos_cache:
self.future_eos_cache[rc_challenge] = []
self.future_eos_cache[rc_challenge].append(eos)
self.future_cache_key_times[rc_challenge] = int(time.time())
log.info(f"Don't have challenge hash {rc_challenge}, caching EOS")
return None
if peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
icc_start_element = None
elif peak.deficit == self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK - 1:
icc_start_element = ClassgroupElement.get_default_element()
else:
icc_start_element = peak.infused_challenge_vdf_output
if peak.deficit < self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK:
curr = peak
while not curr.first_in_sub_slot and not curr.is_challenge_block(self.constants):
curr = blocks.block_record(curr.prev_hash)
if curr.is_challenge_block(self.constants):
icc_challenge = curr.challenge_block_info_hash
icc_iters = uint64(total_iters - curr.total_iters)
else:
assert curr.finished_infused_challenge_slot_hashes is not None
icc_challenge = curr.finished_infused_challenge_slot_hashes[-1]
icc_iters = sub_slot_iters
assert icc_challenge is not None
if can_finish_sub_and_full_epoch(
self.constants,
blocks,
peak.height,
peak.prev_hash,
peak.deficit,
peak.sub_epoch_summary_included is not None,
)[0]:
assert peak_full_block is not None
ses: Optional[SubEpochSummary] = next_sub_epoch_summary(
self.constants, blocks, peak.required_iters, peak_full_block, True
)
if ses is not None:
if eos.challenge_chain.subepoch_summary_hash != ses.get_hash():
log.warning(f"SES not correct {ses.get_hash(), eos.challenge_chain}")
return None
else:
if eos.challenge_chain.subepoch_summary_hash is not None:
log.warning("SES not correct, should be None")
return None
else:
# This is on an empty slot
cc_start_element = ClassgroupElement.get_default_element()
icc_start_element = ClassgroupElement.get_default_element()
iters = sub_slot_iters
icc_iters = sub_slot_iters
# The icc should only be present if the previous slot had an icc too, and not deficit 0 (just finished slot)
icc_challenge = (
last_slot.infused_challenge_chain.get_hash()
if last_slot is not None
and last_slot.infused_challenge_chain is not None
and last_slot.reward_chain.deficit != self.constants.MIN_BLOCKS_PER_CHALLENGE_BLOCK
else None
)
# Validate cc VDF
partial_cc_vdf_info = VDFInfo(
cc_challenge,
iters,
eos.challenge_chain.challenge_chain_end_of_slot_vdf.output,
)
# The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak
if eos.challenge_chain.challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_cc_vdf_info,
number_of_iterations=sub_slot_iters,
):
return None
if (
not eos.proofs.challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.challenge_chain_slot_proof.is_valid(
self.constants,
cc_start_element,
partial_cc_vdf_info,
)
):
return None
if (
eos.proofs.challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.challenge_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.challenge_chain.challenge_chain_end_of_slot_vdf,
)
):
return None
# Validate reward chain VDF
if not eos.proofs.reward_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.reward_chain.end_of_slot_vdf,
VDFInfo(rc_challenge, iters, eos.reward_chain.end_of_slot_vdf.output),
):
return None
if icc_challenge is not None:
assert icc_start_element is not None
assert icc_iters is not None
assert eos.infused_challenge_chain is not None
assert eos.infused_challenge_chain is not None
assert eos.proofs.infused_challenge_chain_slot_proof is not None
partial_icc_vdf_info = VDFInfo(
icc_challenge,
iters,
eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf.output,
)
# The EOS will have the whole sub-slot iters, but the proof is only the delta, from the last peak
if eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf != dataclasses.replace(
partial_icc_vdf_info,
number_of_iterations=icc_iters,
):
return None
if (
not eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
self.constants, icc_start_element, partial_icc_vdf_info
)
):
return None
if (
eos.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
and not eos.proofs.infused_challenge_chain_slot_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
eos.infused_challenge_chain.infused_challenge_chain_end_of_slot_vdf,
)
):
return None
else:
# This is the first sub slot and it's empty, therefore there is no ICC
if eos.infused_challenge_chain is not None or eos.proofs.infused_challenge_chain_slot_proof is not None:
return None
self.finished_sub_slots.append((eos, [None] * self.constants.NUM_SPS_SUB_SLOT, total_iters))
new_cc_hash = eos.challenge_chain.get_hash()
self.recent_eos.put(new_cc_hash, (eos, time.time()))
new_ips: List[timelord_protocol.NewInfusionPointVDF] = []
for ip in self.future_ip_cache.get(eos.reward_chain.get_hash(), []):
new_ips.append(ip)
return new_ips
def new_signage_point(
self,
index: uint8,
blocks: BlockchainInterface,
peak: Optional[BlockRecord],
next_sub_slot_iters: uint64,
signage_point: SignagePoint,
skip_vdf_validation=False,
) -> bool:
"""
Returns true if sp successfully added
"""
assert len(self.finished_sub_slots) >= 1
if peak is None or peak.height < 2:
sub_slot_iters = self.constants.SUB_SLOT_ITERS_STARTING
else:
sub_slot_iters = peak.sub_slot_iters
# If we don't have this slot, return False
if index == 0 or index >= self.constants.NUM_SPS_SUB_SLOT:
return False
assert (
signage_point.cc_vdf is not None
and signage_point.cc_proof is not None
and signage_point.rc_vdf is not None
and signage_point.rc_proof is not None
)
for sub_slot, sp_arr, start_ss_total_iters in self.finished_sub_slots:
if sub_slot is None:
assert start_ss_total_iters == 0
ss_challenge_hash = self.constants.GENESIS_CHALLENGE
ss_reward_hash = self.constants.GENESIS_CHALLENGE
else:
ss_challenge_hash = sub_slot.challenge_chain.get_hash()
ss_reward_hash = sub_slot.reward_chain.get_hash()
if ss_challenge_hash == signage_point.cc_vdf.challenge:
# If we do have this slot, find the Prev block from SP and validate SP
if peak is not None and start_ss_total_iters > peak.total_iters:
# We are in a future sub slot from the peak, so maybe there is a new SSI
checkpoint_size: uint64 = uint64(next_sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
delta_iters: uint64 = uint64(checkpoint_size * index)
future_sub_slot: bool = True
else:
# We are not in a future sub slot from the peak, so there is no new SSI
checkpoint_size = uint64(sub_slot_iters // self.constants.NUM_SPS_SUB_SLOT)
delta_iters = uint64(checkpoint_size * index)
future_sub_slot = False
sp_total_iters = start_ss_total_iters + delta_iters
curr = peak
if peak is None or future_sub_slot:
check_from_start_of_ss = True
else:
check_from_start_of_ss = False
while (
curr is not None
and curr.total_iters > start_ss_total_iters
and curr.total_iters > sp_total_iters
):
if curr.first_in_sub_slot:
# Did not find a block where it's iters are before our sp_total_iters, in this ss
check_from_start_of_ss = True
break
curr = blocks.block_record(curr.prev_hash)
if check_from_start_of_ss:
# Check VDFs from start of sub slot
cc_vdf_info_expected = VDFInfo(
ss_challenge_hash,
delta_iters,
signage_point.cc_vdf.output,
)
rc_vdf_info_expected = VDFInfo(
ss_reward_hash,
delta_iters,
signage_point.rc_vdf.output,
)
else:
# Check VDFs from curr
assert curr is not None
cc_vdf_info_expected = VDFInfo(
ss_challenge_hash,
uint64(sp_total_iters - curr.total_iters),
signage_point.cc_vdf.output,
)
rc_vdf_info_expected = VDFInfo(
curr.reward_infusion_new_challenge,
uint64(sp_total_iters - curr.total_iters),
signage_point.rc_vdf.output,
)
if not signage_point.cc_vdf == dataclasses.replace(
cc_vdf_info_expected, number_of_iterations=delta_iters
):
self.add_to_future_sp(signage_point, index)
return False
if check_from_start_of_ss:
start_ele = ClassgroupElement.get_default_element()
else:
assert curr is not None
start_ele = curr.challenge_vdf_output
if not skip_vdf_validation:
if not signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid(
self.constants,
start_ele,
cc_vdf_info_expected,
):
self.add_to_future_sp(signage_point, index)
return False
if signage_point.cc_proof.normalized_to_identity and not signage_point.cc_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
signage_point.cc_vdf,
):
self.add_to_future_sp(signage_point, index)
return False
if rc_vdf_info_expected.challenge != signage_point.rc_vdf.challenge:
# This signage point is probably outdated
self.add_to_future_sp(signage_point, index)
return False
if not skip_vdf_validation:
if not signage_point.rc_proof.is_valid(
self.constants,
ClassgroupElement.get_default_element(),
signage_point.rc_vdf,
rc_vdf_info_expected,
):
self.add_to_future_sp(signage_point, index)
return False
sp_arr[index] = signage_point
self.recent_signage_points.put(signage_point.cc_vdf.output.get_hash(), (signage_point, time.time()))
return True
self.add_to_future_sp(signage_point, index)
return False
def get_signage_point(self, cc_signage_point: bytes32) -> Optional[SignagePoint]:
assert len(self.finished_sub_slots) >= 1
if cc_signage_point == self.constants.GENESIS_CHALLENGE:
return SignagePoint(None, None, None, None)
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None and sub_slot.challenge_chain.get_hash() == cc_signage_point:
return SignagePoint(None, None, None, None)
for sp in sps:
if sp is not None:
assert sp.cc_vdf is not None
if sp.cc_vdf.output.get_hash() == cc_signage_point:
return sp
return None
def get_signage_point_by_index(
self, challenge_hash: bytes32, index: uint8, last_rc_infusion: bytes32
) -> Optional[SignagePoint]:
assert len(self.finished_sub_slots) >= 1
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None:
cc_hash = sub_slot.challenge_chain.get_hash()
else:
cc_hash = self.constants.GENESIS_CHALLENGE
if cc_hash == challenge_hash:
if index == 0:
return SignagePoint(None, None, None, None)
sp: Optional[SignagePoint] = sps[index]
if sp is not None:
assert sp.rc_vdf is not None
if sp.rc_vdf.challenge == last_rc_infusion:
return sp
return None
return None
def have_newer_signage_point(self, challenge_hash: bytes32, index: uint8, last_rc_infusion: bytes32) -> bool:
"""
Returns true if we have a signage point at this index which is based on a newer infusion.
"""
assert len(self.finished_sub_slots) >= 1
for sub_slot, sps, _ in self.finished_sub_slots:
if sub_slot is not None:
cc_hash = sub_slot.challenge_chain.get_hash()
else:
cc_hash = self.constants.GENESIS_CHALLENGE
if cc_hash == challenge_hash:
found_rc_hash = False
for i in range(0, index):
sp: Optional[SignagePoint] = sps[i]
if sp is not None and sp.rc_vdf is not None and sp.rc_vdf.challenge == last_rc_infusion:
found_rc_hash = True
sp = sps[index]
if (
found_rc_hash
and sp is not None
and sp.rc_vdf is not None
and sp.rc_vdf.challenge != last_rc_infusion
):
return True
return False
def new_peak(
self,
peak: BlockRecord,
peak_full_block: FullBlock,
sp_sub_slot: Optional[EndOfSubSlotBundle], # None if not overflow, or in first/second slot
ip_sub_slot: Optional[EndOfSubSlotBundle], # None if in first slot
fork_block: Optional[BlockRecord],
blocks: BlockchainInterface,
) -> FullNodeStorePeakResult:
"""
If the peak is an overflow block, must provide two sub-slots: one for the current sub-slot and one for
the prev sub-slot (since we still might get more blocks with an sp in the previous sub-slot)
Results in either one or two sub-slots in finished_sub_slots.
"""
assert len(self.finished_sub_slots) >= 1
if ip_sub_slot is None:
# We are still in the first sub-slot, no new sub slots ey
self.initialize_genesis_sub_slot()
else:
# This is not the first sub-slot in the chain
sp_sub_slot_sps: List[Optional[SignagePoint]] = [None] * self.constants.NUM_SPS_SUB_SLOT
ip_sub_slot_sps: List[Optional[SignagePoint]] = [None] * self.constants.NUM_SPS_SUB_SLOT
if fork_block is not None and fork_block.sub_slot_iters != peak.sub_slot_iters:
# If there was a reorg and a difficulty adjustment, just clear all the slots
self.clear_slots()
else:
interval_iters = calculate_sp_interval_iters(self.constants, peak.sub_slot_iters)
# If it's not a reorg, or there is a reorg on the same difficulty, we can keep signage points
# that we had before, in the cache
for index, (sub_slot, sps, total_iters) in enumerate(self.finished_sub_slots):
if sub_slot is None:
continue
if fork_block is None:
# If this is not a reorg, we still want to remove signage points after the new peak
fork_block = peak
replaced_sps: List[Optional[SignagePoint]] = [] # index 0 is the end of sub slot
for i, sp in enumerate(sps):
if (total_iters + i * interval_iters) < fork_block.total_iters:
# Sps before the fork point as still valid
replaced_sps.append(sp)
else:
if sp is not None:
log.debug(
f"Reverting {i} {(total_iters + i * interval_iters)} {fork_block.total_iters}"
)
# Sps after the fork point should be removed
replaced_sps.append(None)
assert len(sps) == len(replaced_sps)
if sub_slot == sp_sub_slot:
sp_sub_slot_sps = replaced_sps
if sub_slot == ip_sub_slot:
ip_sub_slot_sps = replaced_sps
self.clear_slots()
prev_sub_slot_total_iters = peak.sp_sub_slot_total_iters(self.constants)
if sp_sub_slot is not None or prev_sub_slot_total_iters == 0:
assert peak.overflow or prev_sub_slot_total_iters
self.finished_sub_slots.append((sp_sub_slot, sp_sub_slot_sps, prev_sub_slot_total_iters))
ip_sub_slot_total_iters = peak.ip_sub_slot_total_iters(self.constants)
self.finished_sub_slots.append((ip_sub_slot, ip_sub_slot_sps, ip_sub_slot_total_iters))
new_eos: Optional[EndOfSubSlotBundle] = None
new_sps: List[Tuple[uint8, SignagePoint]] = []
new_ips: List[timelord_protocol.NewInfusionPointVDF] = []
future_eos: List[EndOfSubSlotBundle] = self.future_eos_cache.get(peak.reward_infusion_new_challenge, []).copy()
for eos in future_eos:
if self.new_finished_sub_slot(eos, blocks, peak, peak_full_block) is not None:
new_eos = eos
break
future_sps: List[Tuple[uint8, SignagePoint]] = self.future_sp_cache.get(
peak.reward_infusion_new_challenge, []
).copy()
for index, sp in future_sps:
assert sp.cc_vdf is not None
if self.new_signage_point(index, blocks, peak, peak.sub_slot_iters, sp):
new_sps.append((index, sp))
for ip in self.future_ip_cache.get(peak.reward_infusion_new_challenge, []):
new_ips.append(ip)
self.future_eos_cache.pop(peak.reward_infusion_new_challenge, [])
self.future_sp_cache.pop(peak.reward_infusion_new_challenge, [])
self.future_ip_cache.pop(peak.reward_infusion_new_challenge, [])
for eos_op, _, _ in self.finished_sub_slots:
if eos_op is not None:
self.recent_eos.put(eos_op.challenge_chain.get_hash(), (eos_op, time.time()))
return FullNodeStorePeakResult(new_eos, new_sps, new_ips)
def get_finished_sub_slots(
self,
block_records: BlockchainInterface,
prev_b: Optional[BlockRecord],
last_challenge_to_add: bytes32,
) -> Optional[List[EndOfSubSlotBundle]]:
"""
Retrieves the EndOfSubSlotBundles that are in the store either:
1. From the starting challenge if prev_b is None
2. That are not included in the blockchain with peak of prev_b if prev_b is not None
Stops at last_challenge
"""
if prev_b is None:
# The first sub slot must be None
assert self.finished_sub_slots[0][0] is None
challenge_in_chain: bytes32 = self.constants.GENESIS_CHALLENGE
else:
curr: BlockRecord = prev_b
while not curr.first_in_sub_slot:
curr = block_records.block_record(curr.prev_hash)
assert curr is not None
assert curr.finished_challenge_slot_hashes is not None
challenge_in_chain = curr.finished_challenge_slot_hashes[-1]
if last_challenge_to_add == challenge_in_chain:
# No additional slots to add
return []
collected_sub_slots: List[EndOfSubSlotBundle] = []
found_last_challenge = False
found_connecting_challenge = False
for sub_slot, sps, total_iters in self.finished_sub_slots[1:]:
assert sub_slot is not None
if sub_slot.challenge_chain.challenge_chain_end_of_slot_vdf.challenge == challenge_in_chain:
found_connecting_challenge = True
if found_connecting_challenge:
collected_sub_slots.append(sub_slot)
if found_connecting_challenge and sub_slot.challenge_chain.get_hash() == last_challenge_to_add:
found_last_challenge = True
break
if not found_last_challenge:
log.warning(f"Did not find hash {last_challenge_to_add} connected to " f"{challenge_in_chain}")
return None
return collected_sub_slots
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/full_node/full_node_store.py
| 0.846641 | 0.195921 |
full_node_store.py
|
pypi
|
import logging
from typing import List, Optional, Union, Tuple
from salvia.types.blockchain_format.program import Program, SerializedProgram
from salvia.types.generator_types import BlockGenerator, GeneratorArg, GeneratorBlockCacheInterface, CompressorArg
from salvia.util.ints import uint32, uint64
from salvia.wallet.puzzles.load_clvm import load_clvm
from salvia.wallet.puzzles.rom_bootstrap_generator import get_generator
GENERATOR_MOD = get_generator()
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="salvia.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="salvia.wallet.puzzles")
# DECOMPRESS_CSE = load_clvm("decompress_coin_spend_entry.clvm", package_or_requirement="salvia.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_spend_entry_with_prefix.clvm", package_or_requirement="salvia.wallet.puzzles"
)
log = logging.getLogger(__name__)
def create_block_generator(
generator: SerializedProgram, block_heights_list: List[uint32], generator_block_cache: GeneratorBlockCacheInterface
) -> Optional[BlockGenerator]:
"""`create_block_generator` will returns None if it fails to look up any referenced block"""
generator_arg_list: List[GeneratorArg] = []
for i in block_heights_list:
previous_generator = generator_block_cache.get_generator_for_block_height(i)
if previous_generator is None:
log.error(f"Failed to look up generator for block {i}. Ref List: {block_heights_list}")
return None
generator_arg_list.append(GeneratorArg(i, previous_generator))
return BlockGenerator(generator, generator_arg_list)
def create_generator_args(generator_ref_list: List[SerializedProgram]) -> Program:
"""
`create_generator_args`: The format and contents of these arguments affect consensus.
"""
gen_ref_list = [bytes(g) for g in generator_ref_list]
return Program.to([gen_ref_list])
def create_compressed_generator(
original_generator: CompressorArg,
compressed_cse_list: List[List[Union[List[uint64], List[Union[bytes, None, Program]]]]],
) -> BlockGenerator:
"""
Bind the generator block program template to a particular reference block,
template bytes offsets, and SpendBundle.
"""
start = original_generator.start
end = original_generator.end
program = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, Program.to(start), Program.to(end), compressed_cse_list
)
generator_arg = GeneratorArg(original_generator.block_height, original_generator.generator)
return BlockGenerator(program, [generator_arg])
def setup_generator_args(self: BlockGenerator) -> Tuple[SerializedProgram, Program]:
args = create_generator_args(self.generator_refs())
return self.program, args
def run_generator(self: BlockGenerator, max_cost: int) -> Tuple[int, SerializedProgram]:
program, args = setup_generator_args(self)
return GENERATOR_MOD.run_safe_with_cost(max_cost, program, args)
def run_generator_unsafe(self: BlockGenerator, max_cost: int) -> Tuple[int, SerializedProgram]:
"""This mode is meant for accepting possibly soft-forked transactions into the mempool"""
program, args = setup_generator_args(self)
return GENERATOR_MOD.run_with_cost(max_cost, program, args)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/full_node/generator.py
| 0.796649 | 0.209106 |
generator.py
|
pypi
|
import asyncio
import dataclasses
import logging
import traceback
from typing import Awaitable, Callable
log = logging.getLogger(__name__)
@dataclasses.dataclass(frozen=True, order=True)
class PrioritizedCallable:
priority: int
af: Callable[[], Awaitable[object]] = dataclasses.field(compare=False)
class LockQueue:
"""
The purpose of this class is to be able to control access to a lock, and give priority to certain clients
(LockClients). To use it, create a lock and clients:
```
my_lock = LockQueue(asyncio.Lock())
client_a = LockClient(0, my_lock)
client_b = LockClient(1, my_lock)
async with client_a:
...
```
The clients can be used like normal async locks, but the higher priority (lower number) will always go first.
Must be created under an asyncio running loop, and close and await_closed should be called.
"""
def __init__(self, inner_lock: asyncio.Lock):
self._inner_lock: asyncio.Lock = inner_lock
self._task_queue: asyncio.PriorityQueue = asyncio.PriorityQueue()
self._run_task = asyncio.create_task(self._run())
self._release_event = asyncio.Event()
async def put(self, priority: int, callback: Callable[[], Awaitable[object]]):
await self._task_queue.put(PrioritizedCallable(priority=priority, af=callback))
async def acquire(self):
await self._inner_lock.acquire()
def release(self):
self._inner_lock.release()
self._release_event.set()
async def _run(self):
try:
while True:
prioritized_callback = await self._task_queue.get()
self._release_event = asyncio.Event()
await self.acquire()
await prioritized_callback.af()
await self._release_event.wait()
except asyncio.CancelledError:
error_stack = traceback.format_exc()
log.debug(f"LockQueue._run() cancelled: {error_stack}")
def close(self):
self._run_task.cancel()
async def await_closed(self):
await self._run_task
class LockClient:
def __init__(self, priority: int, queue: LockQueue):
self._priority = priority
self._queue = queue
async def __aenter__(self):
called: asyncio.Event = asyncio.Event()
# Use a parameter default to avoid a closure
async def callback(called=called) -> None:
called.set()
await self._queue.put(priority=self._priority, callback=callback)
await called.wait()
async def __aexit__(self, exc_type, exc, tb):
self._queue.release()
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/full_node/lock_queue.py
| 0.792585 | 0.407274 |
lock_queue.py
|
pypi
|
from dataclasses import dataclass
from typing import List, Optional, Set
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock, TransactionsInfo
from salvia.types.blockchain_format.program import SerializedProgram
from salvia.types.blockchain_format.reward_chain_block import RewardChainBlock
from salvia.types.blockchain_format.vdf import VDFProof
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.util.ints import uint32
from salvia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class FullBlock(Streamable):
# All the information required to validate a block
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlock # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
challenge_chain_ip_proof: VDFProof
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_ip_proof: VDFProof
infused_challenge_chain_ip_proof: Optional[VDFProof] # Iff deficit < 4
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_info: Optional[TransactionsInfo] # Reward chain foliage data (tx block additional)
transactions_generator: Optional[SerializedProgram] # Program that generates transactions
transactions_generator_ref_list: List[
uint32
] # List of block heights of previous generators referenced in this block
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def height(self):
return self.reward_chain_block.height
@property
def weight(self):
return self.reward_chain_block.weight
@property
def total_iters(self):
return self.reward_chain_block.total_iters
@property
def header_hash(self):
return self.foliage.get_hash()
def is_transaction_block(self) -> bool:
return self.foliage_transaction_block is not None
def get_included_reward_coins(self) -> Set[Coin]:
if not self.is_transaction_block():
return set()
assert self.transactions_info is not None
return set(self.transactions_info.reward_claims_incorporated)
def is_fully_compactified(self) -> bool:
for sub_slot in self.finished_sub_slots:
if (
sub_slot.proofs.challenge_chain_slot_proof.witness_type != 0
or not sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity
):
return False
if sub_slot.proofs.infused_challenge_chain_slot_proof is not None and (
sub_slot.proofs.infused_challenge_chain_slot_proof.witness_type != 0
or not sub_slot.proofs.infused_challenge_chain_slot_proof.normalized_to_identity
):
return False
if self.challenge_chain_sp_proof is not None and (
self.challenge_chain_sp_proof.witness_type != 0 or not self.challenge_chain_sp_proof.normalized_to_identity
):
return False
if self.challenge_chain_ip_proof.witness_type != 0 or not self.challenge_chain_ip_proof.normalized_to_identity:
return False
return True
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/types/full_block.py
| 0.922652 | 0.331228 |
full_block.py
|
pypi
|
from dataclasses import dataclass
from typing import List, Optional
from salvia.types.blockchain_format.foliage import Foliage, FoliageTransactionBlock, TransactionsInfo
from salvia.types.blockchain_format.reward_chain_block import RewardChainBlock
from salvia.types.blockchain_format.vdf import VDFProof
from salvia.types.end_of_slot_bundle import EndOfSubSlotBundle
from salvia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class HeaderBlock(Streamable):
# Same as a FullBlock but without TransactionInfo and Generator (but with filter), used by light clients
finished_sub_slots: List[EndOfSubSlotBundle] # If first sb
reward_chain_block: RewardChainBlock # Reward chain trunk data
challenge_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
challenge_chain_ip_proof: VDFProof
reward_chain_sp_proof: Optional[VDFProof] # If not first sp in sub-slot
reward_chain_ip_proof: VDFProof
infused_challenge_chain_ip_proof: Optional[VDFProof] # Iff deficit < 4
foliage: Foliage # Reward chain foliage data
foliage_transaction_block: Optional[FoliageTransactionBlock] # Reward chain foliage data (tx block)
transactions_filter: bytes # Filter for block transactions
transactions_info: Optional[TransactionsInfo] # Reward chain foliage data (tx block additional)
@property
def prev_header_hash(self):
return self.foliage.prev_block_hash
@property
def prev_hash(self):
return self.foliage.prev_block_hash
@property
def height(self):
return self.reward_chain_block.height
@property
def weight(self):
return self.reward_chain_block.weight
@property
def header_hash(self):
return self.foliage.get_hash()
@property
def total_iters(self):
return self.reward_chain_block.total_iters
@property
def log_string(self):
return "block " + str(self.header_hash) + " sb_height " + str(self.height) + " "
@property
def is_transaction_block(self) -> bool:
return self.reward_chain_block.is_transaction_block
@property
def first_in_sub_slot(self) -> bool:
return self.finished_sub_slots is not None and len(self.finished_sub_slots) > 0
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/types/header_block.py
| 0.903141 | 0.299386 |
header_block.py
|
pypi
|
import logging
from dataclasses import dataclass
from typing import Optional
from bitstring import BitArray
from blspy import G1Element, AugSchemeMPL, PrivateKey
from chiapos import Verifier
from salvia.consensus.constants import ConsensusConstants
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.hash import std_hash
from salvia.util.ints import uint8
from salvia.util.streamable import Streamable, streamable
log = logging.getLogger(__name__)
@dataclass(frozen=True)
@streamable
class ProofOfSpace(Streamable):
challenge: bytes32
pool_public_key: Optional[G1Element] # Only one of these two should be present
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
size: uint8
proof: bytes
def get_plot_id(self) -> bytes32:
assert self.pool_public_key is None or self.pool_contract_puzzle_hash is None
if self.pool_public_key is None:
return self.calculate_plot_id_ph(self.pool_contract_puzzle_hash, self.plot_public_key)
return self.calculate_plot_id_pk(self.pool_public_key, self.plot_public_key)
def verify_and_get_quality_string(
self,
constants: ConsensusConstants,
original_challenge_hash: bytes32,
signage_point: bytes32,
) -> Optional[bytes32]:
# Exactly one of (pool_public_key, pool_contract_puzzle_hash) must not be None
if (self.pool_public_key is None) and (self.pool_contract_puzzle_hash is None):
log.error("Fail 1")
return None
if (self.pool_public_key is not None) and (self.pool_contract_puzzle_hash is not None):
log.error("Fail 2")
return None
if self.size < constants.MIN_PLOT_SIZE:
log.error("Fail 3")
return None
if self.size > constants.MAX_PLOT_SIZE:
log.error("Fail 4")
return None
plot_id: bytes32 = self.get_plot_id()
new_challenge: bytes32 = ProofOfSpace.calculate_pos_challenge(plot_id, original_challenge_hash, signage_point)
if new_challenge != self.challenge:
log.error("New challenge is not challenge")
return None
if not ProofOfSpace.passes_plot_filter(constants, plot_id, original_challenge_hash, signage_point):
log.error("Fail 5")
return None
return self.get_quality_string(plot_id)
def get_quality_string(self, plot_id: bytes32) -> Optional[bytes32]:
quality_str = Verifier().validate_proof(plot_id, self.size, self.challenge, bytes(self.proof))
if not quality_str:
return None
return bytes32(quality_str)
@staticmethod
def passes_plot_filter(
constants: ConsensusConstants,
plot_id: bytes32,
challenge_hash: bytes32,
signage_point: bytes32,
) -> bool:
plot_filter: BitArray = BitArray(
ProofOfSpace.calculate_plot_filter_input(plot_id, challenge_hash, signage_point)
)
return plot_filter[: constants.NUMBER_ZERO_BITS_PLOT_FILTER].uint == 0
@staticmethod
def calculate_plot_filter_input(plot_id: bytes32, challenge_hash: bytes32, signage_point: bytes32) -> bytes32:
return std_hash(plot_id + challenge_hash + signage_point)
@staticmethod
def calculate_pos_challenge(plot_id: bytes32, challenge_hash: bytes32, signage_point: bytes32) -> bytes32:
return std_hash(ProofOfSpace.calculate_plot_filter_input(plot_id, challenge_hash, signage_point))
@staticmethod
def calculate_plot_id_pk(
pool_public_key: G1Element,
plot_public_key: G1Element,
) -> bytes32:
return std_hash(bytes(pool_public_key) + bytes(plot_public_key))
@staticmethod
def calculate_plot_id_ph(
pool_contract_puzzle_hash: bytes32,
plot_public_key: G1Element,
) -> bytes32:
return std_hash(bytes(pool_contract_puzzle_hash) + bytes(plot_public_key))
@staticmethod
def generate_taproot_sk(local_pk: G1Element, farmer_pk: G1Element) -> PrivateKey:
taproot_message: bytes = bytes(local_pk + farmer_pk) + bytes(local_pk) + bytes(farmer_pk)
taproot_hash: bytes32 = std_hash(taproot_message)
return AugSchemeMPL.key_gen(taproot_hash)
@staticmethod
def generate_plot_public_key(local_pk: G1Element, farmer_pk: G1Element, include_taproot: bool = False) -> G1Element:
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(local_pk, farmer_pk)
return local_pk + farmer_pk + taproot_sk.get_g1()
else:
return local_pk + farmer_pk
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/types/blockchain_format/proof_of_space.py
| 0.876542 | 0.400339 |
proof_of_space.py
|
pypi
|
import io
from typing import List, Set, Tuple, Optional, Any
from clvm import KEYWORD_FROM_ATOM, KEYWORD_TO_ATOM, SExp
from clvm import run_program as default_run_program
from clvm.casts import int_from_bytes
from clvm.EvalError import EvalError
from clvm.operators import OP_REWRITE, OPERATOR_LOOKUP
from clvm.serialize import sexp_from_stream, sexp_to_stream
from clvm_rs import STRICT_MODE, deserialize_and_run_program2, serialized_length, run_generator
from clvm_tools.curry import curry, uncurry
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.hash import std_hash
from salvia.util.byte_types import hexstr_to_bytes
from .tree_hash import sha256_treehash
def run_program(
program,
args,
max_cost,
operator_lookup=OPERATOR_LOOKUP,
pre_eval_f=None,
):
return default_run_program(
program,
args,
operator_lookup,
max_cost,
pre_eval_f=pre_eval_f,
)
INFINITE_COST = 0x7FFFFFFFFFFFFFFF
class Program(SExp):
"""
A thin wrapper around s-expression data intended to be invoked with "eval".
"""
@classmethod
def parse(cls, f) -> "Program":
return sexp_from_stream(f, cls.to)
def stream(self, f):
sexp_to_stream(self, f)
@classmethod
def from_bytes(cls, blob: bytes) -> "Program":
f = io.BytesIO(blob)
result = cls.parse(f) # type: ignore # noqa
assert f.read() == b""
return result
@classmethod
def fromhex(cls, hexstr: str) -> "Program":
return cls.from_bytes(hexstr_to_bytes(hexstr))
def to_serialized_program(self) -> "SerializedProgram":
return SerializedProgram.from_bytes(bytes(self))
@classmethod
def from_serialized_program(cls, sp: "SerializedProgram") -> "Program":
return cls.from_bytes(bytes(sp))
def __bytes__(self) -> bytes:
f = io.BytesIO()
self.stream(f) # type: ignore # noqa
return f.getvalue()
def __str__(self) -> str:
return bytes(self).hex()
def at(self, position: str) -> "Program":
"""
Take a string of only `f` and `r` characters and follow the corresponding path.
Example:
`assert Program.to(17) == Program.to([10, 20, 30, [15, 17], 40, 50]).at("rrrfrf")`
"""
v = self
for c in position.lower():
if c == "f":
v = v.first()
elif c == "r":
v = v.rest()
else:
raise ValueError(f"`at` got illegal character `{c}`. Only `f` & `r` allowed")
return v
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
return sha256_treehash(self, set(args))
def run_with_cost(self, max_cost: int, args) -> Tuple[int, "Program"]:
prog_args = Program.to(args)
cost, r = run_program(self, prog_args, max_cost)
return cost, Program.to(r)
def run(self, args) -> "Program":
cost, r = self.run_with_cost(INFINITE_COST, args)
return r
def curry(self, *args) -> "Program":
cost, r = curry(self, list(args))
return Program.to(r)
def uncurry(self) -> Tuple["Program", "Program"]:
r = uncurry(self)
if r is None:
return self, self.to(0)
return r
def as_int(self) -> int:
return int_from_bytes(self.as_atom())
def as_atom_list(self) -> List[bytes]:
"""
Pretend `self` is a list of atoms. Return the corresponding
python list of atoms.
At each step, we always assume a node to be an atom or a pair.
If the assumption is wrong, we exit early. This way we never fail
and always return SOMETHING.
"""
items = []
obj = self
while True:
pair = obj.pair
if pair is None:
break
atom = pair[0].atom
if atom is None:
break
items.append(atom)
obj = pair[1]
return items
def __deepcopy__(self, memo):
return type(self).from_bytes(bytes(self))
EvalError = EvalError
def _tree_hash(node: SExp, precalculated: Set[bytes32]) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if node.listp():
left = _tree_hash(node.first(), precalculated)
right = _tree_hash(node.rest(), precalculated)
s = b"\2" + left + right
else:
atom = node.as_atom()
if atom in precalculated:
return bytes32(atom)
s = b"\1" + atom
return bytes32(std_hash(s))
def _serialize(node) -> bytes:
if type(node) == SerializedProgram:
return bytes(node)
else:
return SExp.to(node).as_bin()
class SerializedProgram:
"""
An opaque representation of a clvm program. It has a more limited interface than a full SExp
"""
_buf: bytes = b""
@classmethod
def parse(cls, f) -> "SerializedProgram":
length = serialized_length(f.getvalue()[f.tell() :])
return SerializedProgram.from_bytes(f.read(length))
def stream(self, f):
f.write(self._buf)
@classmethod
def from_bytes(cls, blob: bytes) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(blob)
return ret
@classmethod
def fromhex(cls, hexstr: str) -> "SerializedProgram":
return cls.from_bytes(hexstr_to_bytes(hexstr))
@classmethod
def from_program(cls, p: Program) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(p)
return ret
def to_program(self) -> Program:
return Program.from_bytes(self._buf)
def uncurry(self) -> Tuple["Program", "Program"]:
return self.to_program().uncurry()
def __bytes__(self) -> bytes:
return self._buf
def __str__(self) -> str:
return bytes(self).hex()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
def __eq__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return False
return self._buf == other._buf
def __ne__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return True
return self._buf != other._buf
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
tmp = sexp_from_stream(io.BytesIO(self._buf), SExp.to)
return _tree_hash(tmp, set(args))
def run_safe_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, STRICT_MODE, *args)
def run_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, 0, *args)
def run_as_generator(self, max_cost: int, flags: int, *args) -> Tuple[Optional[int], List[Any], int]:
serialized_args = b""
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
native_opcode_names_by_opcode = dict(
("op_%s" % OP_REWRITE.get(k, k), op) for op, k in KEYWORD_FROM_ATOM.items() if k not in "qa."
)
err, npc_list, cost = run_generator(
self._buf,
serialized_args,
KEYWORD_TO_ATOM["q"][0],
KEYWORD_TO_ATOM["a"][0],
native_opcode_names_by_opcode,
max_cost,
flags,
)
return None if err == 0 else err, npc_list, cost
def _run(self, max_cost: int, flags, *args) -> Tuple[int, Program]:
# when multiple arguments are passed, concatenate them into a serialized
# buffer. Some arguments may already be in serialized form (e.g.
# SerializedProgram) so we don't want to de-serialize those just to
# serialize them back again. This is handled by _serialize()
serialized_args = b""
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
# TODO: move this ugly magic into `clvm` "dialects"
native_opcode_names_by_opcode = dict(
("op_%s" % OP_REWRITE.get(k, k), op) for op, k in KEYWORD_FROM_ATOM.items() if k not in "qa."
)
cost, ret = deserialize_and_run_program2(
self._buf,
serialized_args,
KEYWORD_TO_ATOM["q"][0],
KEYWORD_TO_ATOM["a"][0],
native_opcode_names_by_opcode,
max_cost,
flags,
)
return cost, Program.to(ret)
NIL = Program.from_bytes(b"\x80")
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/types/blockchain_format/program.py
| 0.860867 | 0.395835 |
program.py
|
pypi
|
from dataclasses import dataclass
from typing import Any, List
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.clvm import int_to_bytes
from salvia.util.hash import std_hash
from salvia.util.ints import uint64
from salvia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class Coin(Streamable):
"""
This structure is used in the body for the reward and fees genesis coins.
"""
parent_coin_info: bytes32 # down with this sort of thing.
puzzle_hash: bytes32
amount: uint64
def get_hash(self) -> bytes32:
# This does not use streamable format for hashing, the amount is
# serialized using CLVM integer format.
# Note that int_to_bytes() will prepend a 0 to integers where the most
# significant bit is set, to encode it as a positive number. This
# despite "amount" being unsigned. This way, a CLVM program can generate
# these hashes easily.
return std_hash(self.parent_coin_info + self.puzzle_hash + int_to_bytes(self.amount))
def name(self) -> bytes32:
return self.get_hash()
def as_list(self) -> List[Any]:
return [self.parent_coin_info, self.puzzle_hash, self.amount]
@property
def name_str(self) -> str:
return self.name().hex()
@classmethod
def from_bytes(cls, blob):
# this function is never called. We rely on the standard streamable
# protocol for both serialization and parsing of Coin.
# using this function may be ambiguous the same way __bytes__() is.
assert False
def __bytes__(self) -> bytes: # pylint: disable=E0308
# this function is never called and calling it would be ambiguous. Do
# you want the format that's hashed or the format that's serialized?
assert False
def hash_coin_list(coin_list: List[Coin]) -> bytes32:
coin_list.sort(key=lambda x: x.name_str, reverse=True)
buffer = bytearray()
for coin in coin_list:
buffer.extend(coin.name())
return std_hash(buffer)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/types/blockchain_format/coin.py
| 0.915254 | 0.360855 |
coin.py
|
pypi
|
import logging
import traceback
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional
from functools import lru_cache
from chiavdf import create_discriminant, verify_n_wesolowski
from salvia.consensus.constants import ConsensusConstants
from salvia.types.blockchain_format.classgroup import ClassgroupElement
from salvia.types.blockchain_format.sized_bytes import bytes32, bytes100
from salvia.util.ints import uint8, uint64
from salvia.util.streamable import Streamable, streamable
log = logging.getLogger(__name__)
@lru_cache(maxsize=200)
def get_discriminant(challenge, size_bites) -> int:
return int(
create_discriminant(challenge, size_bites),
16,
)
@lru_cache(maxsize=1000)
def verify_vdf(
disc: int,
input_el: bytes100,
output: bytes,
number_of_iterations: uint64,
discriminant_size: int,
witness_type: uint8,
):
return verify_n_wesolowski(
str(disc),
input_el,
output,
number_of_iterations,
discriminant_size,
witness_type,
)
@dataclass(frozen=True)
@streamable
class VDFInfo(Streamable):
challenge: bytes32 # Used to generate the discriminant (VDF group)
number_of_iterations: uint64
output: ClassgroupElement
@dataclass(frozen=True)
@streamable
class VDFProof(Streamable):
witness_type: uint8
witness: bytes
normalized_to_identity: bool
def is_valid(
self,
constants: ConsensusConstants,
input_el: ClassgroupElement,
info: VDFInfo,
target_vdf_info: Optional[VDFInfo] = None,
) -> bool:
"""
If target_vdf_info is passed in, it is compared with info.
"""
if target_vdf_info is not None and info != target_vdf_info:
tb = traceback.format_stack()
log.error(f"{tb} INVALID VDF INFO. Have: {info} Expected: {target_vdf_info}")
return False
if self.witness_type + 1 > constants.MAX_VDF_WITNESS_SIZE:
return False
try:
disc: int = get_discriminant(info.challenge, constants.DISCRIMINANT_SIZE_BITS)
# TODO: parallelize somehow, this might included multiple mini proofs (n weso)
return verify_vdf(
disc,
input_el.data,
info.output.data + bytes(self.witness),
info.number_of_iterations,
constants.DISCRIMINANT_SIZE_BITS,
self.witness_type,
)
except Exception:
return False
# Stores, for a given VDF, the field that uses it.
class CompressibleVDFField(IntEnum):
CC_EOS_VDF = 1
ICC_EOS_VDF = 2
CC_SP_VDF = 3
CC_IP_VDF = 4
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/types/blockchain_format/vdf.py
| 0.685423 | 0.180992 |
vdf.py
|
pypi
|
from dataclasses import dataclass
from typing import List, Optional
from blspy import G2Element
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.pool_target import PoolTarget
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.ints import uint64
from salvia.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class TransactionsInfo(Streamable):
# Information that goes along with each transaction block
generator_root: bytes32 # sha256 of the block generator in this block
generator_refs_root: bytes32 # sha256 of the concatenation of the generator ref list entries
aggregated_signature: G2Element
fees: uint64 # This only includes user fees, not block rewards
cost: uint64 # This is the total cost of this block, including CLVM cost, cost of program size and conditions
reward_claims_incorporated: List[Coin] # These can be in any order
@dataclass(frozen=True)
@streamable
class FoliageTransactionBlock(Streamable):
# Information that goes along with each transaction block that is relevant for light clients
prev_transaction_block_hash: bytes32
timestamp: uint64
filter_hash: bytes32
additions_root: bytes32
removals_root: bytes32
transactions_info_hash: bytes32
@dataclass(frozen=True)
@streamable
class FoliageBlockData(Streamable):
# Part of the block that is signed by the plot key
unfinished_reward_block_hash: bytes32
pool_target: PoolTarget
pool_signature: Optional[G2Element] # Iff ProofOfSpace has a pool pk
farmer_reward_puzzle_hash: bytes32
extension_data: bytes32 # Used for future updates. Can be any 32 byte value initially
@dataclass(frozen=True)
@streamable
class Foliage(Streamable):
# The entire foliage block, containing signature and the unsigned back pointer
# The hash of this is the "header hash". Note that for unfinished blocks, the prev_block_hash
# Is the prev from the signage point, and can be replaced with a more recent block
prev_block_hash: bytes32
reward_block_hash: bytes32
foliage_block_data: FoliageBlockData
foliage_block_data_signature: G2Element
foliage_transaction_block_hash: Optional[bytes32]
foliage_transaction_block_signature: Optional[G2Element]
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/types/blockchain_format/foliage.py
| 0.899759 | 0.395397 |
foliage.py
|
pypi
|
import functools
from typing import List, Optional
from blspy import AugSchemeMPL, G1Element, G2Element, GTElement
from salvia.types.blockchain_format.sized_bytes import bytes48
from salvia.util.hash import std_hash
from salvia.util.lru_cache import LRUCache
def get_pairings(cache: LRUCache, pks: List[bytes48], msgs: List[bytes], force_cache: bool) -> List[GTElement]:
pairings: List[Optional[GTElement]] = []
missing_count: int = 0
for pk, msg in zip(pks, msgs):
aug_msg: bytes = pk + msg
h: bytes = bytes(std_hash(aug_msg))
pairing: Optional[GTElement] = cache.get(h)
if not force_cache and pairing is None:
missing_count += 1
# Heuristic to avoid more expensive sig validation with pairing
# cache when it's empty and cached pairings won't be useful later
# (e.g. while syncing)
if missing_count > len(pks) // 2:
return []
pairings.append(pairing)
for i, pairing in enumerate(pairings):
if pairing is None:
aug_msg = pks[i] + msgs[i]
aug_hash: G2Element = AugSchemeMPL.g2_from_message(aug_msg)
pairing = G1Element.from_bytes(pks[i]).pair(aug_hash)
h = bytes(std_hash(aug_msg))
cache.put(h, pairing)
pairings[i] = pairing
return pairings
# Increasing this number will increase RAM usage, but decrease BLS validation time for blocks and unfinished blocks.
LOCAL_CACHE: LRUCache = LRUCache(50000)
def aggregate_verify(
pks: List[bytes48], msgs: List[bytes], sig: G2Element, force_cache: bool = False, cache: LRUCache = LOCAL_CACHE
):
pairings: List[GTElement] = get_pairings(cache, pks, msgs, force_cache)
if len(pairings) == 0:
pks_objects: List[G1Element] = [G1Element.from_bytes(pk) for pk in pks]
return AugSchemeMPL.aggregate_verify(pks_objects, msgs, sig)
pairings_prod: GTElement = functools.reduce(GTElement.__mul__, pairings)
return pairings_prod == sig.pair(G1Element.generator())
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/cached_bls.py
| 0.758332 | 0.255541 |
cached_bls.py
|
pypi
|
from typing import List, Tuple
from chiabip158 import PyBIP158
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.full_block import FullBlock
from salvia.types.header_block import HeaderBlock
from salvia.types.name_puzzle_condition import NPC
from salvia.util.condition_tools import created_outputs_for_conditions_dict
def get_block_header(block: FullBlock, tx_addition_coins: List[Coin], removals_names: List[bytes32]) -> HeaderBlock:
# Create filter
byte_array_tx: List[bytes32] = []
addition_coins = tx_addition_coins + list(block.get_included_reward_coins())
if block.is_transaction_block():
for coin in addition_coins:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for name in removals_names:
byte_array_tx.append(bytearray(name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter: bytes = bytes(bip158.GetEncoded())
return HeaderBlock(
block.finished_sub_slots,
block.reward_chain_block,
block.challenge_chain_sp_proof,
block.challenge_chain_ip_proof,
block.reward_chain_sp_proof,
block.reward_chain_ip_proof,
block.infused_challenge_chain_ip_proof,
block.foliage,
block.foliage_transaction_block,
encoded_filter,
block.transactions_info,
)
def additions_for_npc(npc_list: List[NPC]) -> List[Coin]:
additions: List[Coin] = []
for npc in npc_list:
for coin in created_outputs_for_conditions_dict(npc.condition_dict, npc.coin_name):
additions.append(coin)
return additions
def tx_removals_and_additions(npc_list: List[NPC]) -> Tuple[List[bytes32], List[Coin]]:
"""
Doesn't return farmer and pool reward.
"""
removals: List[bytes32] = []
additions: List[Coin] = []
# build removals list
if npc_list is None:
return [], []
for npc in npc_list:
removals.append(npc.coin_name)
additions.extend(additions_for_npc(npc_list))
return removals, additions
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/generator_tools.py
| 0.719285 | 0.472623 |
generator_tools.py
|
pypi
|
from __future__ import annotations
import dataclasses
import io
import pprint
import sys
from enum import Enum
from typing import Any, BinaryIO, Dict, List, Tuple, Type, Callable, Optional, Iterator
from blspy import G1Element, G2Element, PrivateKey
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.hash import std_hash
from salvia.util.ints import int64, int512, uint32, uint64, uint128
from salvia.util.type_checking import is_type_List, is_type_SpecificOptional, is_type_Tuple, strictdataclass
if sys.version_info < (3, 8):
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
return getattr(t, "__args__", ())
else:
from typing import get_args
pp = pprint.PrettyPrinter(indent=1, width=120, compact=True)
# TODO: Remove hack, this allows streaming these objects from binary
size_hints = {
"PrivateKey": PrivateKey.PRIVATE_KEY_SIZE,
"G1Element": G1Element.SIZE,
"G2Element": G2Element.SIZE,
"ConditionOpcode": 1,
}
unhashable_types = [
"PrivateKey",
"G1Element",
"G2Element",
"Program",
"SerializedProgram",
]
# JSON does not support big ints, so these types must be serialized differently in JSON
big_ints = [uint64, int64, uint128, int512]
def dataclass_from_dict(klass, d):
"""
Converts a dictionary based on a dataclass, into an instance of that dataclass.
Recursively goes through lists, optionals, and dictionaries.
"""
if is_type_SpecificOptional(klass):
# Type is optional, data is either None, or Any
if not d:
return None
return dataclass_from_dict(get_args(klass)[0], d)
elif is_type_Tuple(klass):
# Type is tuple, can have multiple different types inside
i = 0
klass_properties = []
for item in d:
klass_properties.append(dataclass_from_dict(klass.__args__[i], item))
i = i + 1
return tuple(klass_properties)
elif dataclasses.is_dataclass(klass):
# Type is a dataclass, data is a dictionary
fieldtypes = {f.name: f.type for f in dataclasses.fields(klass)}
return klass(**{f: dataclass_from_dict(fieldtypes[f], d[f]) for f in d})
elif is_type_List(klass):
# Type is a list, data is a list
return [dataclass_from_dict(get_args(klass)[0], item) for item in d]
elif issubclass(klass, bytes):
# Type is bytes, data is a hex string
return klass(hexstr_to_bytes(d))
elif klass.__name__ in unhashable_types:
# Type is unhashable (bls type), so cast from hex string
return klass.from_bytes(hexstr_to_bytes(d))
else:
# Type is a primitive, cast with correct class
return klass(d)
def recurse_jsonify(d):
"""
Makes bytes objects and unhashable types into strings with 0x, and makes large ints into
strings.
"""
if isinstance(d, list) or isinstance(d, tuple):
new_list = []
for item in d:
if type(item).__name__ in unhashable_types or issubclass(type(item), bytes):
item = f"0x{bytes(item).hex()}"
if isinstance(item, dict):
item = recurse_jsonify(item)
if isinstance(item, list):
item = recurse_jsonify(item)
if isinstance(item, tuple):
item = recurse_jsonify(item)
if isinstance(item, Enum):
item = item.name
if isinstance(item, int) and type(item) in big_ints:
item = int(item)
new_list.append(item)
d = new_list
else:
for key, value in d.items():
if type(value).__name__ in unhashable_types or issubclass(type(value), bytes):
d[key] = f"0x{bytes(value).hex()}"
if isinstance(value, dict):
d[key] = recurse_jsonify(value)
if isinstance(value, list):
d[key] = recurse_jsonify(value)
if isinstance(value, tuple):
d[key] = recurse_jsonify(value)
if isinstance(value, Enum):
d[key] = value.name
if isinstance(value, int) and type(value) in big_ints:
d[key] = int(value)
return d
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS = {}
def streamable(cls: Any):
"""
This is a decorator for class definitions. It applies the strictdataclass decorator,
which checks all types at construction. It also defines a simple serialization format,
and adds parse, from bytes, stream, and __bytes__ methods.
The primitives are:
* Sized ints serialized in big endian format, e.g. uint64
* Sized bytes serialized in big endian format, e.g. bytes32
* BLS public keys serialized in bls format (48 bytes)
* BLS signatures serialized in bls format (96 bytes)
* bool serialized into 1 byte (0x01 or 0x00)
* bytes serialized as a 4 byte size prefix and then the bytes.
* ConditionOpcode is serialized as a 1 byte value.
* str serialized as a 4 byte size prefix and then the utf-8 representation in bytes.
An item is one of:
* primitive
* Tuple[item1, .. itemx]
* List[item1, .. itemx]
* Optional[item]
* Custom item
A streamable must be a Tuple at the root level (although a dataclass is used here instead).
Iters are serialized in the following way:
1. A tuple of x items is serialized by appending the serialization of each item.
2. A List is serialized into a 4 byte size prefix (number of items) and the serialization of each item.
3. An Optional is serialized into a 1 byte prefix of 0x00 or 0x01, and if it's one, it's followed by the serialization of the item.
4. A Custom item is serialized by calling the .parse method, passing in the stream of bytes into it. An example is a CLVM program.
All of the constituents must have parse/from_bytes, and stream/__bytes__ and therefore
be of fixed size. For example, int cannot be a constituent since it is not a fixed size,
whereas uint32 can be.
Furthermore, a get_hash() member is added, which performs a serialization and a sha256.
This class is used for deterministic serialization and hashing, for consensus critical
objects such as the block header.
Make sure to use the Streamable class as a parent class when using the streamable decorator,
as it will allow linters to recognize the methods that are added by the decorator. Also,
use the @dataclass(frozen=True) decorator as well, for linters to recognize constructor
arguments.
"""
cls1 = strictdataclass(cls)
t = type(cls.__name__, (cls1, Streamable), {})
parse_functions = []
try:
fields = cls1.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
for _, f_type in fields.items():
parse_functions.append(cls.function_to_parse_one_item(f_type))
PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[t] = parse_functions
return t
def parse_bool(f: BinaryIO) -> bool:
bool_byte = f.read(1)
assert bool_byte is not None and len(bool_byte) == 1 # Checks for EOF
if bool_byte == bytes([0]):
return False
elif bool_byte == bytes([1]):
return True
else:
raise ValueError("Bool byte must be 0 or 1")
def parse_uint32(f: BinaryIO, byteorder: str = "big") -> uint32:
size_bytes = f.read(4)
assert size_bytes is not None and len(size_bytes) == 4 # Checks for EOF
return uint32(int.from_bytes(size_bytes, byteorder))
def write_uint32(f: BinaryIO, value: uint32, byteorder: str = "big"):
f.write(value.to_bytes(4, byteorder))
def parse_optional(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> Optional[Any]:
is_present_bytes = f.read(1)
assert is_present_bytes is not None and len(is_present_bytes) == 1 # Checks for EOF
if is_present_bytes == bytes([0]):
return None
elif is_present_bytes == bytes([1]):
return parse_inner_type_f(f)
else:
raise ValueError("Optional must be 0 or 1")
def parse_bytes(f: BinaryIO) -> bytes:
list_size = parse_uint32(f)
bytes_read = f.read(list_size)
assert bytes_read is not None and len(bytes_read) == list_size
return bytes_read
def parse_list(f: BinaryIO, parse_inner_type_f: Callable[[BinaryIO], Any]) -> List[Any]:
full_list: List = []
# wjb assert inner_type != get_args(List)[0]
list_size = parse_uint32(f)
for list_index in range(list_size):
full_list.append(parse_inner_type_f(f))
return full_list
def parse_tuple(f: BinaryIO, list_parse_inner_type_f: List[Callable[[BinaryIO], Any]]) -> Tuple[Any, ...]:
full_list = []
for parse_f in list_parse_inner_type_f:
full_list.append(parse_f(f))
return tuple(full_list)
def parse_size_hints(f: BinaryIO, f_type: Type, bytes_to_read: int) -> Any:
bytes_read = f.read(bytes_to_read)
assert bytes_read is not None and len(bytes_read) == bytes_to_read
return f_type.from_bytes(bytes_read)
def parse_str(f: BinaryIO) -> str:
str_size = parse_uint32(f)
str_read_bytes = f.read(str_size)
assert str_read_bytes is not None and len(str_read_bytes) == str_size # Checks for EOF
return bytes.decode(str_read_bytes, "utf-8")
class Streamable:
@classmethod
def function_to_parse_one_item(cls: Type[cls.__name__], f_type: Type): # type: ignore
"""
This function returns a function taking one argument `f: BinaryIO` that parses
and returns a value of the given type.
"""
inner_type: Type
if f_type is bool:
return parse_bool
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_optional(f, parse_inner_type_f)
if hasattr(f_type, "parse"):
return f_type.parse
if f_type == bytes:
return parse_bytes
if is_type_List(f_type):
inner_type = get_args(f_type)[0]
parse_inner_type_f = cls.function_to_parse_one_item(inner_type)
return lambda f: parse_list(f, parse_inner_type_f)
if is_type_Tuple(f_type):
inner_types = get_args(f_type)
list_parse_inner_type_f = [cls.function_to_parse_one_item(_) for _ in inner_types]
return lambda f: parse_tuple(f, list_parse_inner_type_f)
if hasattr(f_type, "from_bytes") and f_type.__name__ in size_hints:
bytes_to_read = size_hints[f_type.__name__]
return lambda f: parse_size_hints(f, f_type, bytes_to_read)
if f_type is str:
return parse_str
raise NotImplementedError(f"Type {f_type} does not have parse")
@classmethod
def parse(cls: Type[cls.__name__], f: BinaryIO) -> cls.__name__: # type: ignore
# Create the object without calling __init__() to avoid unnecessary post-init checks in strictdataclass
obj: Streamable = object.__new__(cls)
fields: Iterator[str] = iter(getattr(cls, "__annotations__", {}))
values: Iterator = (parse_f(f) for parse_f in PARSE_FUNCTIONS_FOR_STREAMABLE_CLASS[cls])
for field, value in zip(fields, values):
object.__setattr__(obj, field, value)
# Use -1 as a sentinel value as it's not currently serializable
if next(fields, -1) != -1:
raise ValueError("Failed to parse incomplete Streamable object")
if next(values, -1) != -1:
raise ValueError("Failed to parse unknown data in Streamable object")
return obj
def stream_one_item(self, f_type: Type, item, f: BinaryIO) -> None:
inner_type: Type
if is_type_SpecificOptional(f_type):
inner_type = get_args(f_type)[0]
if item is None:
f.write(bytes([0]))
else:
f.write(bytes([1]))
self.stream_one_item(inner_type, item, f)
elif f_type == bytes:
write_uint32(f, uint32(len(item)))
f.write(item)
elif hasattr(f_type, "stream"):
item.stream(f)
elif hasattr(f_type, "__bytes__"):
f.write(bytes(item))
elif is_type_List(f_type):
assert is_type_List(type(item))
write_uint32(f, uint32(len(item)))
inner_type = get_args(f_type)[0]
# wjb assert inner_type != get_args(List)[0] # type: ignore
for element in item:
self.stream_one_item(inner_type, element, f)
elif is_type_Tuple(f_type):
inner_types = get_args(f_type)
assert len(item) == len(inner_types)
for i in range(len(item)):
self.stream_one_item(inner_types[i], item[i], f)
elif f_type is str:
str_bytes = item.encode("utf-8")
write_uint32(f, uint32(len(str_bytes)))
f.write(str_bytes)
elif f_type is bool:
f.write(int(item).to_bytes(1, "big"))
else:
raise NotImplementedError(f"can't stream {item}, {f_type}")
def stream(self, f: BinaryIO) -> None:
try:
fields = self.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
for f_name, f_type in fields.items():
self.stream_one_item(f_type, getattr(self, f_name), f)
def get_hash(self) -> bytes32:
return bytes32(std_hash(bytes(self)))
@classmethod
def from_bytes(cls: Any, blob: bytes) -> Any:
f = io.BytesIO(blob)
parsed = cls.parse(f)
assert f.read() == b""
return parsed
def __bytes__(self: Any) -> bytes:
f = io.BytesIO()
self.stream(f)
return bytes(f.getvalue())
def __str__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def __repr__(self: Any) -> str:
return pp.pformat(recurse_jsonify(dataclasses.asdict(self)))
def to_json_dict(self) -> Dict:
return recurse_jsonify(dataclasses.asdict(self))
@classmethod
def from_json_dict(cls: Any, json_dict: Dict) -> Any:
return dataclass_from_dict(cls, json_dict)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/streamable.py
| 0.521227 | 0.301324 |
streamable.py
|
pypi
|
from typing import Dict, List, Optional, Tuple, Set
from salvia.types.announcement import Announcement
from salvia.types.name_puzzle_condition import NPC
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program, SerializedProgram
from salvia.types.blockchain_format.sized_bytes import bytes32, bytes48
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.types.condition_with_args import ConditionWithArgs
from salvia.util.clvm import int_from_bytes
from salvia.util.errors import ConsensusError, Err
from salvia.util.ints import uint64
# TODO: review each `assert` and consider replacing with explicit checks
# since asserts can be stripped with python `-OO` flag
def parse_sexp_to_condition(
sexp: Program,
) -> Tuple[Optional[Err], Optional[ConditionWithArgs]]:
"""
Takes a ChiaLisp sexp and returns a ConditionWithArgs.
If it fails, returns an Error
"""
as_atoms = sexp.as_atom_list()
if len(as_atoms) < 1:
return Err.INVALID_CONDITION, None
opcode = as_atoms[0]
opcode = ConditionOpcode(opcode)
return None, ConditionWithArgs(opcode, as_atoms[1:])
def parse_sexp_to_conditions(
sexp: Program,
) -> Tuple[Optional[Err], Optional[List[ConditionWithArgs]]]:
"""
Takes a ChiaLisp sexp (list) and returns the list of ConditionWithArgss
If it fails, returns as Error
"""
results: List[ConditionWithArgs] = []
try:
for _ in sexp.as_iter():
error, cvp = parse_sexp_to_condition(_)
if error:
return error, None
results.append(cvp) # type: ignore # noqa
except ConsensusError:
return Err.INVALID_CONDITION, None
return None, results
def conditions_by_opcode(
conditions: List[ConditionWithArgs],
) -> Dict[ConditionOpcode, List[ConditionWithArgs]]:
"""
Takes a list of ConditionWithArgss(CVP) and return dictionary of CVPs keyed of their opcode
"""
d: Dict[ConditionOpcode, List[ConditionWithArgs]] = {}
cvp: ConditionWithArgs
for cvp in conditions:
if cvp.opcode not in d:
d[cvp.opcode] = list()
d[cvp.opcode].append(cvp)
return d
def pkm_pairs(npc_list: List[NPC], additional_data: bytes) -> Tuple[List[bytes48], List[bytes]]:
ret: Tuple[List[bytes48], List[bytes]] = ([], [])
for npc in npc_list:
for opcode, l in npc.conditions:
if opcode == ConditionOpcode.AGG_SIG_UNSAFE:
for cwa in l:
assert len(cwa.vars) == 2
assert len(cwa.vars[0]) == 48 and len(cwa.vars[1]) <= 1024
assert cwa.vars[0] is not None and cwa.vars[1] is not None
ret[0].append(bytes48(cwa.vars[0]))
ret[1].append(cwa.vars[1])
elif opcode == ConditionOpcode.AGG_SIG_ME:
for cwa in l:
assert len(cwa.vars) == 2
assert len(cwa.vars[0]) == 48 and len(cwa.vars[1]) <= 1024
assert cwa.vars[0] is not None and cwa.vars[1] is not None
ret[0].append(bytes48(cwa.vars[0]))
ret[1].append(cwa.vars[1] + npc.coin_name + additional_data)
return ret
def pkm_pairs_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]], coin_name: bytes32, additional_data: bytes
) -> List[Tuple[bytes48, bytes]]:
assert coin_name is not None
ret: List[Tuple[bytes48, bytes]] = []
for cwa in conditions_dict.get(ConditionOpcode.AGG_SIG_UNSAFE, []):
assert len(cwa.vars) == 2
assert len(cwa.vars[0]) == 48 and len(cwa.vars[1]) <= 1024
assert cwa.vars[0] is not None and cwa.vars[1] is not None
ret.append((bytes48(cwa.vars[0]), cwa.vars[1]))
for cwa in conditions_dict.get(ConditionOpcode.AGG_SIG_ME, []):
assert len(cwa.vars) == 2
assert len(cwa.vars[0]) == 48 and len(cwa.vars[1]) <= 1024
assert cwa.vars[0] is not None and cwa.vars[1] is not None
ret.append((bytes48(cwa.vars[0]), cwa.vars[1] + coin_name + additional_data))
return ret
def created_outputs_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
input_coin_name: bytes32,
) -> List[Coin]:
output_coins = []
for cvp in conditions_dict.get(ConditionOpcode.CREATE_COIN, []):
puzzle_hash, amount_bin = cvp.vars[0], cvp.vars[1]
amount = int_from_bytes(amount_bin)
coin = Coin(input_coin_name, puzzle_hash, uint64(amount))
output_coins.append(coin)
return output_coins
def coin_announcements_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
input_coin: Coin,
) -> Set[Announcement]:
output_announcements: Set[Announcement] = set()
for cvp in conditions_dict.get(ConditionOpcode.CREATE_COIN_ANNOUNCEMENT, []):
message = cvp.vars[0]
assert len(message) <= 1024
announcement = Announcement(input_coin.name(), message)
output_announcements.add(announcement)
return output_announcements
def puzzle_announcements_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
input_coin: Coin,
) -> Set[Announcement]:
output_announcements: Set[Announcement] = set()
for cvp in conditions_dict.get(ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, []):
message = cvp.vars[0]
assert len(message) <= 1024
announcement = Announcement(input_coin.puzzle_hash, message)
output_announcements.add(announcement)
return output_announcements
def coin_announcement_names_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
input_coin: Coin,
) -> List[bytes32]:
output = [an.name() for an in coin_announcements_for_conditions_dict(conditions_dict, input_coin)]
return output
def puzzle_announcement_names_for_conditions_dict(
conditions_dict: Dict[ConditionOpcode, List[ConditionWithArgs]],
input_coin: Coin,
) -> List[bytes32]:
output = [an.name() for an in puzzle_announcements_for_conditions_dict(conditions_dict, input_coin)]
return output
def conditions_dict_for_solution(
puzzle_reveal: SerializedProgram,
solution: SerializedProgram,
max_cost: int,
) -> Tuple[Optional[Err], Optional[Dict[ConditionOpcode, List[ConditionWithArgs]]], uint64]:
error, result, cost = conditions_for_solution(puzzle_reveal, solution, max_cost)
if error or result is None:
return error, None, uint64(0)
return None, conditions_by_opcode(result), cost
def conditions_for_solution(
puzzle_reveal: SerializedProgram,
solution: SerializedProgram,
max_cost: int,
) -> Tuple[Optional[Err], Optional[List[ConditionWithArgs]], uint64]:
# get the standard script for a puzzle hash and feed in the solution
try:
cost, r = puzzle_reveal.run_with_cost(max_cost, solution)
error, result = parse_sexp_to_conditions(r)
return error, result, uint64(cost)
except Program.EvalError:
return Err.SEXP_ERROR, None, uint64(0)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/condition_tools.py
| 0.683525 | 0.582729 |
condition_tools.py
|
pypi
|
import colorama
import os
import pkg_resources
import sys
import unicodedata
from bitstring import BitArray # pyright: reportMissingImports=false
from blspy import AugSchemeMPL, G1Element, PrivateKey # pyright: reportMissingImports=false
from salvia.util.hash import std_hash
from salvia.util.keyring_wrapper import KeyringWrapper
from hashlib import pbkdf2_hmac
from pathlib import Path
from secrets import token_bytes
from time import sleep
from typing import Any, Dict, List, Optional, Tuple
CURRENT_KEY_VERSION = "1.8"
DEFAULT_USER = f"user-salvia-{CURRENT_KEY_VERSION}" # e.g. user-salvia-1.8
DEFAULT_SERVICE = f"salvia-{DEFAULT_USER}" # e.g. salvia-user-salvia-1.8
DEFAULT_PASSPHRASE_PROMPT = (
colorama.Fore.YELLOW + colorama.Style.BRIGHT + "(Unlock Keyring)" + colorama.Style.RESET_ALL + " Passphrase: "
) # noqa: E501
FAILED_ATTEMPT_DELAY = 0.5
MAX_KEYS = 100
MAX_RETRIES = 3
MIN_PASSPHRASE_LEN = 8
class KeyringIsLocked(Exception):
pass
class KeyringRequiresMigration(Exception):
pass
class KeyringCurrentPassphraseIsInvalid(Exception):
pass
class KeyringMaxUnlockAttempts(Exception):
pass
def supports_keyring_passphrase() -> bool:
# Support can be disabled by setting CHIA_PASSPHRASE_SUPPORT to 0/false
return os.environ.get("CHIA_PASSPHRASE_SUPPORT", "true").lower() in ["1", "true"]
def supports_os_passphrase_storage() -> bool:
return sys.platform in ["darwin", "win32", "cygwin"]
def passphrase_requirements() -> Dict[str, Any]:
"""
Returns a dictionary specifying current passphrase requirements
"""
if not supports_keyring_passphrase:
return {}
return {"is_optional": True, "min_length": MIN_PASSPHRASE_LEN} # lgtm [py/clear-text-logging-sensitive-data]
def set_keys_root_path(keys_root_path: Path) -> None:
"""
Used to set the keys_root_path prior to instantiating the KeyringWrapper shared instance.
"""
KeyringWrapper.set_keys_root_path(keys_root_path)
def obtain_current_passphrase(prompt: str = DEFAULT_PASSPHRASE_PROMPT, use_passphrase_cache: bool = False) -> str:
"""
Obtains the master passphrase for the keyring, optionally using the cached
value (if previously set). If the passphrase isn't already cached, the user is
prompted interactively to enter their passphrase a max of MAX_RETRIES times
before failing.
"""
from salvia.cmds.passphrase_funcs import prompt_for_passphrase
if use_passphrase_cache:
passphrase, validated = KeyringWrapper.get_shared_instance().get_cached_master_passphrase()
if passphrase:
# If the cached passphrase was previously validated, we assume it's... valid
if validated:
return passphrase
# Cached passphrase needs to be validated
if KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase):
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase, validated=True)
return passphrase
else:
# Cached passphrase is bad, clear the cache
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(None)
# Prompt interactively with up to MAX_RETRIES attempts
for i in range(MAX_RETRIES):
colorama.init()
passphrase = prompt_for_passphrase(prompt)
if KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase):
# If using the passphrase cache, and the user inputted a passphrase, update the cache
if use_passphrase_cache:
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase, validated=True)
return passphrase
sleep(FAILED_ATTEMPT_DELAY)
print("Incorrect passphrase\n")
raise KeyringMaxUnlockAttempts("maximum passphrase attempts reached")
def unlocks_keyring(use_passphrase_cache=False):
"""
Decorator used to unlock the keyring interactively, if necessary
"""
def inner(func):
def wrapper(*args, **kwargs):
try:
if KeyringWrapper.get_shared_instance().has_master_passphrase():
obtain_current_passphrase(use_passphrase_cache=use_passphrase_cache)
except Exception as e:
print(f"Unable to unlock the keyring: {e}")
sys.exit(1)
return func(*args, **kwargs)
return wrapper
return inner
def bip39_word_list() -> str:
return pkg_resources.resource_string(__name__, "english.txt").decode()
def generate_mnemonic() -> str:
mnemonic_bytes = token_bytes(32)
mnemonic = bytes_to_mnemonic(mnemonic_bytes)
return mnemonic
def bytes_to_mnemonic(mnemonic_bytes: bytes) -> str:
if len(mnemonic_bytes) not in [16, 20, 24, 28, 32]:
raise ValueError(
f"Data length should be one of the following: [16, 20, 24, 28, 32], but it is {len(mnemonic_bytes)}."
)
word_list = bip39_word_list().splitlines()
CS = len(mnemonic_bytes) // 4
checksum = BitArray(bytes(std_hash(mnemonic_bytes)))[:CS]
bitarray = BitArray(mnemonic_bytes) + checksum
mnemonics = []
assert len(bitarray) % 11 == 0
for i in range(0, len(bitarray) // 11):
start = i * 11
end = start + 11
bits = bitarray[start:end]
m_word_position = bits.uint
m_word = word_list[m_word_position]
mnemonics.append(m_word)
return " ".join(mnemonics)
def bytes_from_mnemonic(mnemonic_str: str) -> bytes:
mnemonic: List[str] = mnemonic_str.split(" ")
if len(mnemonic) not in [12, 15, 18, 21, 24]:
raise ValueError("Invalid mnemonic length")
word_list = {word: i for i, word in enumerate(bip39_word_list().splitlines())}
bit_array = BitArray()
for i in range(0, len(mnemonic)):
word = mnemonic[i]
if word not in word_list:
raise ValueError(f"'{word}' is not in the mnemonic dictionary; may be misspelled")
value = word_list[word]
bit_array.append(BitArray(uint=value, length=11))
CS: int = len(mnemonic) // 3
ENT: int = len(mnemonic) * 11 - CS
assert len(bit_array) == len(mnemonic) * 11
assert ENT % 32 == 0
entropy_bytes = bit_array[:ENT].bytes
checksum_bytes = bit_array[ENT:]
checksum = BitArray(std_hash(entropy_bytes))[:CS]
assert len(checksum_bytes) == CS
if checksum != checksum_bytes:
raise ValueError("Invalid order of mnemonic words")
return entropy_bytes
def mnemonic_to_seed(mnemonic: str, passphrase: str) -> bytes:
"""
Uses BIP39 standard to derive a seed from entropy bytes.
"""
salt_str: str = "mnemonic" + passphrase
salt = unicodedata.normalize("NFKD", salt_str).encode("utf-8")
mnemonic_normalized = unicodedata.normalize("NFKD", mnemonic).encode("utf-8")
seed = pbkdf2_hmac("sha512", mnemonic_normalized, salt, 2048)
assert len(seed) == 64
return seed
def default_keychain_user() -> str:
return DEFAULT_USER
def default_keychain_service() -> str:
return DEFAULT_SERVICE
def get_private_key_user(user: str, index: int) -> str:
"""
Returns the keychain user string for a key index.
"""
return f"wallet-{user}-{index}"
class Keychain:
"""
The keychain stores two types of keys: private keys, which are PrivateKeys from blspy,
and private key seeds, which are bytes objects that are used as a seed to construct
PrivateKeys. Private key seeds are converted to mnemonics when shown to users.
Both types of keys are stored as hex strings in the python keyring, and the implementation of
the keyring depends on OS. Both types of keys can be added, and get_private_keys returns a
list of all keys.
"""
def __init__(self, user: Optional[str] = None, service: Optional[str] = None):
self.user = user if user is not None else default_keychain_user()
self.service = service if service is not None else default_keychain_service()
self.keyring_wrapper = KeyringWrapper.get_shared_instance()
@unlocks_keyring(use_passphrase_cache=True)
def _get_pk_and_entropy(self, user: str) -> Optional[Tuple[G1Element, bytes]]:
"""
Returns the keychain contents for a specific 'user' (key index). The contents
include an G1Element and the entropy required to generate the private key.
Note that generating the actual private key also requires the passphrase.
"""
read_str = self.keyring_wrapper.get_passphrase(self.service, user)
if read_str is None or len(read_str) == 0:
return None
str_bytes = bytes.fromhex(read_str)
return (
G1Element.from_bytes(str_bytes[: G1Element.SIZE]),
str_bytes[G1Element.SIZE :], # flake8: noqa
)
def _get_free_private_key_index(self) -> int:
"""
Get the index of the first free spot in the keychain.
"""
index = 0
while True:
pk = get_private_key_user(self.user, index)
pkent = self._get_pk_and_entropy(pk)
if pkent is None:
return index
index += 1
@unlocks_keyring(use_passphrase_cache=True)
def add_private_key(self, mnemonic: str, passphrase: str) -> PrivateKey:
"""
Adds a private key to the keychain, with the given entropy and passphrase. The
keychain itself will store the public key, and the entropy bytes,
but not the passphrase.
"""
seed = mnemonic_to_seed(mnemonic, passphrase)
entropy = bytes_from_mnemonic(mnemonic)
index = self._get_free_private_key_index()
key = AugSchemeMPL.key_gen(seed)
fingerprint = key.get_g1().get_fingerprint()
if fingerprint in [pk.get_fingerprint() for pk in self.get_all_public_keys()]:
# Prevents duplicate add
return key
self.keyring_wrapper.set_passphrase(
self.service,
get_private_key_user(self.user, index),
bytes(key.get_g1()).hex() + entropy.hex(),
)
return key
def get_first_private_key(self, passphrases: List[str] = [""]) -> Optional[Tuple[PrivateKey, bytes]]:
"""
Returns the first key in the keychain that has one of the passed in passphrases.
"""
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
for pp in passphrases:
mnemonic = bytes_to_mnemonic(ent)
seed = mnemonic_to_seed(mnemonic, pp)
key = AugSchemeMPL.key_gen(seed)
if key.get_g1() == pk:
return (key, ent)
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return None
def get_private_key_by_fingerprint(
self, fingerprint: int, passphrases: List[str] = [""]
) -> Optional[Tuple[PrivateKey, bytes]]:
"""
Return first private key which have the given public key fingerprint.
"""
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
for pp in passphrases:
mnemonic = bytes_to_mnemonic(ent)
seed = mnemonic_to_seed(mnemonic, pp)
key = AugSchemeMPL.key_gen(seed)
if pk.get_fingerprint() == fingerprint:
return (key, ent)
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return None
def get_all_private_keys(self, passphrases: List[str] = [""]) -> List[Tuple[PrivateKey, bytes]]:
"""
Returns all private keys which can be retrieved, with the given passphrases.
A tuple of key, and entropy bytes (i.e. mnemonic) is returned for each key.
"""
all_keys: List[Tuple[PrivateKey, bytes]] = []
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
for pp in passphrases:
mnemonic = bytes_to_mnemonic(ent)
seed = mnemonic_to_seed(mnemonic, pp)
key = AugSchemeMPL.key_gen(seed)
if key.get_g1() == pk:
all_keys.append((key, ent))
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return all_keys
def get_all_public_keys(self) -> List[G1Element]:
"""
Returns all public keys.
"""
all_keys: List[Tuple[G1Element, bytes]] = []
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
all_keys.append(pk)
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return all_keys
def get_first_public_key(self) -> Optional[G1Element]:
"""
Returns the first public key.
"""
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
return pk
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
return None
def delete_key_by_fingerprint(self, fingerprint: int):
"""
Deletes all keys which have the given public key fingerprint.
"""
index = 0
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
while index <= MAX_KEYS:
if pkent is not None:
pk, ent = pkent
if pk.get_fingerprint() == fingerprint:
self.keyring_wrapper.delete_passphrase(self.service, get_private_key_user(self.user, index))
index += 1
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
def delete_all_keys(self):
"""
Deletes all keys from the keychain.
"""
index = 0
delete_exception = False
pkent = None
while True:
try:
pkent = self._get_pk_and_entropy(get_private_key_user(self.user, index))
self.keyring_wrapper.delete_passphrase(self.service, get_private_key_user(self.user, index))
except Exception:
# Some platforms might throw on no existing key
delete_exception = True
# Stop when there are no more keys to delete
if (pkent is None or delete_exception) and index > MAX_KEYS:
break
index += 1
index = 0
delete_exception = True
pkent = None
while True:
try:
pkent = self._get_pk_and_entropy(
get_private_key_user(self.user, index)
) # changed from _get_fingerprint_and_entropy to _get_pk_and_entropy - GH
self.keyring_wrapper.delete_passphrase(self.service, get_private_key_user(self.user, index))
except Exception:
# Some platforms might throw on no existing key
delete_exception = True
# Stop when there are no more keys to delete
if (pkent is None or delete_exception) and index > MAX_KEYS:
break
index += 1
@staticmethod
def is_keyring_locked() -> bool:
"""
Returns whether the keyring is in a locked state. If the keyring doesn't have a master passphrase set,
or if a master passphrase is set and the cached passphrase is valid, the keyring is "unlocked"
"""
# Unlocked: If a master passphrase isn't set, or if the cached passphrase is valid
if not Keychain.has_master_passphrase() or (
Keychain.has_cached_passphrase()
and Keychain.master_passphrase_is_valid(Keychain.get_cached_master_passphrase())
):
return False
# Locked: Everything else
return True
@staticmethod
def needs_migration() -> bool:
"""
Returns a bool indicating whether the underlying keyring needs to be migrated to the new
format for passphrase support.
"""
return KeyringWrapper.get_shared_instance().using_legacy_keyring()
@staticmethod
def handle_migration_completed():
"""
When migration completes outside of the current process, we rely on a notification to inform
the current process that it needs to reset/refresh its keyring. This allows us to stop using
the legacy keyring in an already-running daemon if migration is completed using the CLI.
"""
KeyringWrapper.get_shared_instance().refresh_keyrings()
@staticmethod
def migrate_legacy_keyring(
passphrase: Optional[str] = None,
passphrase_hint: Optional[str] = None,
save_passphrase: bool = False,
cleanup_legacy_keyring: bool = False,
) -> None:
"""
Begins legacy keyring migration in a non-interactive manner
"""
if passphrase is not None and passphrase != "":
KeyringWrapper.get_shared_instance().set_master_passphrase(
current_passphrase=None,
new_passphrase=passphrase,
write_to_keyring=False,
allow_migration=False,
passphrase_hint=passphrase_hint,
save_passphrase=save_passphrase,
)
KeyringWrapper.get_shared_instance().migrate_legacy_keyring(cleanup_legacy_keyring=cleanup_legacy_keyring)
@staticmethod
def passphrase_is_optional() -> bool:
"""
Returns whether a user-supplied passphrase is optional, as specified by the passphrase requirements.
"""
return passphrase_requirements().get("is_optional", False)
@staticmethod
def minimum_passphrase_length() -> int:
"""
Returns the minimum passphrase length, as specified by the passphrase requirements.
"""
return passphrase_requirements().get("min_length", 0)
@staticmethod
def passphrase_meets_requirements(passphrase: Optional[str]) -> bool:
"""
Returns whether the provided passphrase satisfies the passphrase requirements.
"""
# Passphrase is not required and None was provided
if (passphrase is None or passphrase == "") and Keychain.passphrase_is_optional():
return True
# Passphrase meets the minimum length requirement
if passphrase is not None and len(passphrase) >= Keychain.minimum_passphrase_length():
return True
return False
@staticmethod
def has_master_passphrase() -> bool:
"""
Returns a bool indicating whether the underlying keyring data
is secured by a passphrase.
"""
return KeyringWrapper.get_shared_instance().has_master_passphrase()
@staticmethod
def master_passphrase_is_valid(passphrase: str, force_reload: bool = False) -> bool:
"""
Checks whether the provided passphrase can unlock the keyring. If force_reload
is true, the keyring payload will be re-read from the backing file. If false,
the passphrase will be checked against the in-memory payload.
"""
return KeyringWrapper.get_shared_instance().master_passphrase_is_valid(passphrase, force_reload=force_reload)
@staticmethod
def has_cached_passphrase() -> bool:
"""
Returns whether the master passphrase has been cached (it may need to be validated)
"""
return KeyringWrapper.get_shared_instance().has_cached_master_passphrase()
@staticmethod
def get_cached_master_passphrase() -> str:
"""
Returns the cached master passphrase
"""
passphrase, _ = KeyringWrapper.get_shared_instance().get_cached_master_passphrase()
return passphrase
@staticmethod
def set_cached_master_passphrase(passphrase: Optional[str]) -> None:
"""
Caches the provided master passphrase
"""
KeyringWrapper.get_shared_instance().set_cached_master_passphrase(passphrase)
@staticmethod
def set_master_passphrase(
current_passphrase: Optional[str],
new_passphrase: str,
*,
allow_migration: bool = True,
passphrase_hint: Optional[str] = None,
save_passphrase: bool = False,
) -> None:
"""
Encrypts the keyring contents to new passphrase, provided that the current
passphrase can decrypt the contents
"""
KeyringWrapper.get_shared_instance().set_master_passphrase(
current_passphrase,
new_passphrase,
allow_migration=allow_migration,
passphrase_hint=passphrase_hint,
save_passphrase=save_passphrase,
)
@staticmethod
def remove_master_passphrase(current_passphrase: Optional[str]) -> None:
"""
Removes the user-provided master passphrase, and replaces it with the default
master passphrase. The keyring contents will remain encrypted, but to the
default passphrase.
"""
KeyringWrapper.get_shared_instance().remove_master_passphrase(current_passphrase)
@staticmethod
def get_master_passphrase_hint() -> Optional[str]:
"""
Returns the passphrase hint from the keyring
"""
return KeyringWrapper.get_shared_instance().get_master_passphrase_hint()
@staticmethod
def set_master_passphrase_hint(current_passphrase: str, passphrase_hint: Optional[str]) -> None:
"""
Convenience method for setting/removing the passphrase hint. Requires the current
passphrase, as the passphrase hint is written as part of a passphrase update.
"""
Keychain.set_master_passphrase(current_passphrase, current_passphrase, passphrase_hint=passphrase_hint)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/keychain.py
| 0.578686 | 0.205276 |
keychain.py
|
pypi
|
import argparse
import os
import shutil
import sys
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union
import pkg_resources
import yaml
from salvia.util.path import mkdir
def initial_config_file(filename: Union[str, Path]) -> str:
return pkg_resources.resource_string(__name__, f"initial-{filename}").decode()
def create_default_salvia_config(root_path: Path, filenames=["config.yaml"]) -> None:
for filename in filenames:
default_config_file_data: str = initial_config_file(filename)
path: Path = config_path_for_filename(root_path, filename)
tmp_path: Path = path.with_suffix("." + str(os.getpid()))
mkdir(path.parent)
with open(tmp_path, "w") as f:
f.write(default_config_file_data)
try:
os.replace(str(tmp_path), str(path))
except PermissionError:
shutil.move(str(tmp_path), str(path))
def config_path_for_filename(root_path: Path, filename: Union[str, Path]) -> Path:
path_filename = Path(filename)
if path_filename.is_absolute():
return path_filename
return root_path / "config" / filename
def save_config(root_path: Path, filename: Union[str, Path], config_data: Any):
path: Path = config_path_for_filename(root_path, filename)
tmp_path: Path = path.with_suffix("." + str(os.getpid()))
with open(tmp_path, "w") as f:
yaml.safe_dump(config_data, f)
try:
os.replace(str(tmp_path), path)
except PermissionError:
shutil.move(str(tmp_path), str(path))
def load_config(
root_path: Path,
filename: Union[str, Path],
sub_config: Optional[str] = None,
exit_on_error=True,
) -> Dict:
path = config_path_for_filename(root_path, filename)
if not path.is_file():
if not exit_on_error:
raise ValueError("Config not found")
print(f"can't find {path}")
print("** please run `salvia init` to migrate or create new config files **")
# TODO: fix this hack
sys.exit(-1)
r = yaml.safe_load(open(path, "r"))
if sub_config is not None:
r = r.get(sub_config)
return r
def load_config_cli(root_path: Path, filename: str, sub_config: Optional[str] = None) -> Dict:
"""
Loads configuration from the specified filename, in the config directory,
and then overrides any properties using the passed in command line arguments.
Nested properties in the config file can be used in the command line with ".",
for example --farmer_peer.host. Does not support lists.
"""
config = load_config(root_path, filename, sub_config)
flattened_props = flatten_properties(config)
parser = argparse.ArgumentParser()
for prop_name, value in flattened_props.items():
if type(value) is list:
continue
prop_type: Callable = str2bool if type(value) is bool else type(value) # type: ignore
parser.add_argument(f"--{prop_name}", type=prop_type, dest=prop_name)
for key, value in vars(parser.parse_args()).items():
if value is not None:
flattened_props[key] = value
return unflatten_properties(flattened_props)
def flatten_properties(config: Dict) -> Dict:
properties = {}
for key, value in config.items():
if type(value) is dict:
for key_2, value_2 in flatten_properties(value).items():
properties[key + "." + key_2] = value_2
else:
properties[key] = value
return properties
def unflatten_properties(config: Dict) -> Dict:
properties: Dict = {}
for key, value in config.items():
if "." in key:
add_property(properties, key, value)
else:
properties[key] = value
return properties
def add_property(d: Dict, partial_key: str, value: Any):
key_1, key_2 = partial_key.split(".", maxsplit=1)
if key_1 not in d:
d[key_1] = {}
if "." in key_2:
add_property(d[key_1], key_2, value)
else:
d[key_1][key_2] = value
def str2bool(v: Union[str, bool]) -> bool:
# Source from https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "True", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "False", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
def traverse_dict(d: Dict, key_path: str) -> Any:
"""
Traverse nested dictionaries to find the element pointed-to by key_path.
Key path components are separated by a ':' e.g.
"root:child:a"
"""
if type(d) is not dict:
raise TypeError(f"unable to traverse into non-dict value with key path: {key_path}")
# Extract one path component at a time
components = key_path.split(":", maxsplit=1)
if components is None or len(components) == 0:
raise KeyError(f"invalid config key path: {key_path}")
key = components[0]
remaining_key_path = components[1] if len(components) > 1 else None
val: Any = d.get(key, None)
if val is not None:
if remaining_key_path is not None:
return traverse_dict(val, remaining_key_path)
return val
else:
raise KeyError(f"value not found for key: {key}")
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/config.py
| 0.541651 | 0.17774 |
config.py
|
pypi
|
import logging
from typing import Dict, List, Optional
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.header_block import HeaderBlock
from salvia.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from salvia.util.ints import uint32
class BlockCache(BlockchainInterface):
def __init__(
self,
blocks: Dict[bytes32, BlockRecord],
headers: Dict[bytes32, HeaderBlock] = None,
height_to_hash: Dict[uint32, bytes32] = None,
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = None,
):
if sub_epoch_summaries is None:
sub_epoch_summaries = {}
if height_to_hash is None:
height_to_hash = {}
if headers is None:
headers = {}
self._block_records = blocks
self._headers = headers
self._height_to_hash = height_to_hash
self._sub_epoch_summaries = sub_epoch_summaries
self._sub_epoch_segments: Dict[uint32, SubEpochSegments] = {}
self.log = logging.getLogger(__name__)
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self._block_records[header_hash]
def height_to_block_record(self, height: uint32, check_db: bool = False) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self._sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self._sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
if height not in self._height_to_hash:
self.log.warning(f"could not find height in cache {height}")
return None
return self._height_to_hash[height]
def contains_block(self, header_hash: bytes32) -> bool:
return header_hash in self._block_records
def contains_height(self, height: uint32) -> bool:
return height in self._height_to_hash
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return self._block_records
async def get_block_records_at(self, heights: List[uint32]) -> List[BlockRecord]:
block_records: List[BlockRecord] = []
for height in heights:
block_records.append(self.height_to_block_record(height))
return block_records
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
return self._block_records[header_hash]
def remove_block_record(self, header_hash: bytes32):
del self._block_records[header_hash]
def add_block_record(self, block: BlockRecord):
self._block_records[block.header_hash] = block
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
return self._headers
async def persist_sub_epoch_challenge_segments(
self, sub_epoch_summary_height: uint32, segments: List[SubEpochChallengeSegment]
):
self._sub_epoch_segments[sub_epoch_summary_height] = SubEpochSegments(segments)
async def get_sub_epoch_challenge_segments(
self,
sub_epoch_summary_height: uint32,
) -> Optional[List[SubEpochChallengeSegment]]:
segments = self._sub_epoch_segments.get(sub_epoch_summary_height)
if segments is None:
return None
return segments.challenge_segments
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/block_cache.py
| 0.877903 | 0.263582 |
block_cache.py
|
pypi
|
def format_bytes(bytes: int) -> str:
if not isinstance(bytes, int) or bytes < 0:
return "Invalid"
LABELS = ("MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB")
BASE = 1024
value = bytes / BASE
for label in LABELS:
value /= BASE
if value < BASE:
return f"{value:.3f} {label}"
return f"{value:.3f} {LABELS[-1]}"
def format_minutes(minutes: int) -> str:
if not isinstance(minutes, int):
return "Invalid"
if minutes == 0:
return "Now"
hour_minutes = 60
day_minutes = 24 * hour_minutes
week_minutes = 7 * day_minutes
months_minutes = 43800
year_minutes = 12 * months_minutes
years = int(minutes / year_minutes)
months = int(minutes / months_minutes)
weeks = int(minutes / week_minutes)
days = int(minutes / day_minutes)
hours = int(minutes / hour_minutes)
def format_unit_string(str_unit: str, count: int) -> str:
return f"{count} {str_unit}{('s' if count > 1 else '')}"
def format_unit(unit: str, count: int, unit_minutes: int, next_unit: str, next_unit_minutes: int) -> str:
formatted = format_unit_string(unit, count)
minutes_left = minutes % unit_minutes
if minutes_left >= next_unit_minutes:
formatted += " and " + format_unit_string(next_unit, int(minutes_left / next_unit_minutes))
return formatted
if years > 0:
return format_unit("year", years, year_minutes, "month", months_minutes)
if months > 0:
return format_unit("month", months, months_minutes, "week", week_minutes)
if weeks > 0:
return format_unit("week", weeks, week_minutes, "day", day_minutes)
if days > 0:
return format_unit("day", days, day_minutes, "hour", hour_minutes)
if hours > 0:
return format_unit("hour", hours, hour_minutes, "minute", 1)
if minutes > 0:
return format_unit_string("minute", minutes)
return "Unknown"
def prompt_yes_no(prompt: str = "(y/n) ") -> bool:
while True:
response = str(input(prompt)).lower().strip()
ch = response[:1]
if ch == "y":
return True
elif ch == "n":
return False
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/misc.py
| 0.668664 | 0.534066 |
misc.py
|
pypi
|
import dataclasses
import sys
from typing import Any, List, Optional, Tuple, Type, Union
if sys.version_info < (3, 8):
def get_args(t: Type[Any]) -> Tuple[Any, ...]:
return getattr(t, "__args__", ())
def get_origin(t: Type[Any]) -> Optional[Type[Any]]:
return getattr(t, "__origin__", None)
else:
from typing import get_args, get_origin
def is_type_List(f_type: Type) -> bool:
return (get_origin(f_type) is not None and get_origin(f_type) == list) or f_type == list
def is_type_SpecificOptional(f_type) -> bool:
"""
Returns true for types such as Optional[T], but not Optional, or T.
"""
return get_origin(f_type) is not None and f_type.__origin__ == Union and get_args(f_type)[1]() is None
def is_type_Tuple(f_type: Type) -> bool:
return (get_origin(f_type) is not None and get_origin(f_type) == tuple) or f_type == tuple
def strictdataclass(cls: Any):
class _Local:
"""
Dataclass where all fields must be type annotated, and type checking is performed
at initialization, even recursively through Lists. Non-annotated fields are ignored.
Also, for any fields which have a type with .from_bytes(bytes) or constructor(bytes),
bytes can be passed in and the type can be constructed.
"""
def parse_item(self, item: Any, f_name: str, f_type: Type) -> Any:
if is_type_List(f_type):
collected_list: List = []
inner_type: Type = get_args(f_type)[0]
# wjb assert inner_type != get_args(List)[0] # type: ignore
if not is_type_List(type(item)):
raise ValueError(f"Wrong type for {f_name}, need a list.")
for el in item:
collected_list.append(self.parse_item(el, f_name, inner_type))
return collected_list
if is_type_SpecificOptional(f_type):
if item is None:
return None
else:
inner_type: Type = get_args(f_type)[0] # type: ignore
return self.parse_item(item, f_name, inner_type)
if is_type_Tuple(f_type):
collected_list = []
if not is_type_Tuple(type(item)) and not is_type_List(type(item)):
raise ValueError(f"Wrong type for {f_name}, need a tuple.")
if len(item) != len(get_args(f_type)):
raise ValueError(f"Wrong number of elements in tuple {f_name}.")
for i in range(len(item)):
inner_type = get_args(f_type)[i]
tuple_item = item[i]
collected_list.append(self.parse_item(tuple_item, f_name, inner_type))
return tuple(collected_list)
if not isinstance(item, f_type):
try:
item = f_type(item)
except (TypeError, AttributeError, ValueError):
try:
item = f_type.from_bytes(item)
except Exception:
item = f_type.from_bytes(bytes(item))
if not isinstance(item, f_type):
raise ValueError(f"Wrong type for {f_name}")
return item
def __post_init__(self):
try:
fields = self.__annotations__ # pylint: disable=no-member
except Exception:
fields = {}
data = self.__dict__
for (f_name, f_type) in fields.items():
if f_name not in data:
raise ValueError(f"Field {f_name} not present")
try:
if not isinstance(data[f_name], f_type):
object.__setattr__(self, f_name, self.parse_item(data[f_name], f_name, f_type))
except TypeError:
# Throws a TypeError because we cannot call isinstance for subscripted generics like Optional[int]
object.__setattr__(self, f_name, self.parse_item(data[f_name], f_name, f_type))
class NoTypeChecking:
__no_type_check__ = True
cls1 = dataclasses.dataclass(cls, init=False, frozen=True) # type: ignore
if dataclasses.fields(cls1) == ():
return type(cls.__name__, (cls1, _Local, NoTypeChecking), {})
return type(cls.__name__, (cls1, _Local), {})
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/type_checking.py
| 0.583441 | 0.269742 |
type_checking.py
|
pypi
|
from abc import ABCMeta, abstractmethod
from hashlib import sha256
from typing import Any, Dict, List, Tuple
from salvia.types.blockchain_format.sized_bytes import bytes32
"""
A simple, confidence-inspiring Merkle Set standard
Advantages of this standard:
Low CPU requirements
Small proofs of inclusion/exclusion
Reasonably simple implementation
The main tricks in this standard are:
Skips repeated hashing of exactly two things even when they share prefix bits
Proofs support proving including/exclusion for a large number of values in
a single string. They're a serialization of a subset of the tree.
Proof format:
multiproof: subtree
subtree: middle or terminal or truncated or empty
middle: MIDDLE 1 subtree subtree
terminal: TERMINAL 1 hash 32
# If the sibling is empty truncated implies more than two children.
truncated: TRUNCATED 1 hash 32
empty: EMPTY 1
EMPTY: \x00
TERMINAL: \x01
MIDDLE: \x02
TRUNCATED: \x03
"""
EMPTY = bytes([0])
TERMINAL = bytes([1])
MIDDLE = bytes([2])
TRUNCATED = bytes([3])
BLANK = bytes([0] * 32)
prehashed: Dict[bytes, Any] = {}
def init_prehashed():
for x in [EMPTY, TERMINAL, MIDDLE]:
for y in [EMPTY, TERMINAL, MIDDLE]:
prehashed[x + y] = sha256(bytes([0] * 30) + x + y)
init_prehashed()
def hashdown(mystr: bytes) -> bytes:
assert len(mystr) == 66
h = prehashed[bytes(mystr[0:1] + mystr[33:34])].copy()
h.update(mystr[1:33] + mystr[34:])
return h.digest()[:32]
def compress_root(mystr: bytes) -> bytes:
assert len(mystr) == 33
if mystr[0:1] == MIDDLE:
return mystr[1:]
if mystr[0:1] == EMPTY:
assert mystr[1:] == BLANK
return BLANK
return sha256(mystr).digest()[:32]
def get_bit(mybytes: bytes, pos: int) -> int:
assert len(mybytes) == 32
return (mybytes[pos // 8] >> (7 - (pos % 8))) & 1
class Node(metaclass=ABCMeta):
hash: bytes
@abstractmethod
def get_hash(self) -> bytes:
pass
@abstractmethod
def is_empty(self) -> bool:
pass
@abstractmethod
def is_terminal(self) -> bool:
pass
@abstractmethod
def is_double(self) -> bool:
pass
@abstractmethod
def add(self, toadd: bytes, depth: int) -> "Node":
pass
@abstractmethod
def remove(self, toremove: bytes, depth: int):
pass
@abstractmethod
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
pass
@abstractmethod
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
pass
@abstractmethod
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class MerkleSet:
root: Node
def __init__(self, root: Node = None):
if root is None:
self.root = _empty
else:
self.root = root
def get_root(self) -> bytes:
return compress_root(self.root.get_hash())
def add_already_hashed(self, toadd: bytes):
self.root = self.root.add(toadd, 0)
def remove_already_hashed(self, toremove: bytes):
self.root = self.root.remove(toremove, 0)
def is_included_already_hashed(self, tocheck: bytes) -> Tuple[bool, bytes]:
proof: List = []
r = self.root.is_included(tocheck, 0, proof)
return r, b"".join(proof)
def _audit(self, hashes: List[bytes]):
newhashes: List = []
self.root._audit(newhashes, [])
assert newhashes == sorted(newhashes)
class EmptyNode(Node):
def __init__(self):
self.hash = BLANK
def get_hash(self) -> bytes:
return EMPTY + BLANK
def is_empty(self) -> bool:
return True
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
return TerminalNode(toadd)
def remove(self, toremove: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(EMPTY)
return False
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(EMPTY)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
_empty = EmptyNode()
class TerminalNode(Node):
def __init__(self, hash: bytes, bits: List[int] = None):
assert len(hash) == 32
self.hash = hash
if bits is not None:
self._audit([], bits)
def get_hash(self) -> bytes:
return TERMINAL + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return True
def is_double(self) -> bool:
raise SetError()
def add(self, toadd: bytes, depth: int) -> Node:
if toadd == self.hash:
return self
if toadd > self.hash:
return self._make_middle([self, TerminalNode(toadd)], depth)
else:
return self._make_middle([TerminalNode(toadd), self], depth)
def _make_middle(self, children: Any, depth: int) -> Node:
cbits = [get_bit(child.hash, depth) for child in children]
if cbits[0] != cbits[1]:
return MiddleNode(children)
nextvals: List[Node] = [_empty, _empty]
nextvals[cbits[0] ^ 1] = _empty # type: ignore
nextvals[cbits[0]] = self._make_middle(children, depth + 1)
return MiddleNode(nextvals)
def remove(self, toremove: bytes, depth: int) -> Node:
if toremove == self.hash:
return _empty
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(TERMINAL + self.hash)
return tocheck == self.hash
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TERMINAL + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
hashes.append(self.hash)
for pos, v in enumerate(bits):
assert get_bit(self.hash, pos) == v
class MiddleNode(Node):
def __init__(self, children: List[Node]):
self.children = children
if children[0].is_empty() and children[1].is_double():
self.hash = children[1].hash
elif children[1].is_empty() and children[0].is_double():
self.hash = children[0].hash
else:
if children[0].is_empty() and (children[1].is_empty() or children[1].is_terminal()):
raise SetError()
if children[1].is_empty() and children[0].is_terminal():
raise SetError
if children[0].is_terminal() and children[1].is_terminal() and children[0].hash >= children[1].hash:
raise SetError
self.hash = hashdown(children[0].get_hash() + children[1].get_hash())
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
if self.children[0].is_empty():
return self.children[1].is_double()
if self.children[1].is_empty():
return self.children[0].is_double()
return self.children[0].is_terminal() and self.children[1].is_terminal()
def add(self, toadd: bytes, depth: int) -> Node:
bit = get_bit(toadd, depth)
child = self.children[bit]
newchild = child.add(toadd, depth + 1)
if newchild is child:
return self
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def remove(self, toremove: bytes, depth: int) -> Node:
bit = get_bit(toremove, depth)
child = self.children[bit]
newchild = child.remove(toremove, depth + 1)
if newchild is child:
return self
otherchild = self.children[bit ^ 1]
if newchild.is_empty() and otherchild.is_terminal():
return otherchild
if newchild.is_terminal() and otherchild.is_empty():
return newchild
newvals = [x for x in self.children]
newvals[bit] = newchild
return MiddleNode(newvals)
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
p.append(MIDDLE)
if get_bit(tocheck, depth) == 0:
r = self.children[0].is_included(tocheck, depth + 1, p)
self.children[1].other_included(tocheck, depth + 1, p, not self.children[0].is_empty())
return r
else:
self.children[0].other_included(tocheck, depth + 1, p, not self.children[1].is_empty())
return self.children[1].is_included(tocheck, depth + 1, p)
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
if collapse or not self.is_double():
p.append(TRUNCATED + self.hash)
else:
self.is_included(tocheck, depth, p)
def _audit(self, hashes: List[bytes], bits: List[int]):
self.children[0]._audit(hashes, bits + [0])
self.children[1]._audit(hashes, bits + [1])
class TruncatedNode(Node):
def __init__(self, hash: bytes):
self.hash = hash
def get_hash(self) -> bytes:
return MIDDLE + self.hash
def is_empty(self) -> bool:
return False
def is_terminal(self) -> bool:
return False
def is_double(self) -> bool:
return False
def add(self, toadd: bytes, depth: int) -> Node:
return self
def remove(self, toremove: bytes, depth: int) -> Node:
return self
def is_included(self, tocheck: bytes, depth: int, p: List[bytes]) -> bool:
raise SetError()
def other_included(self, tocheck: bytes, depth: int, p: List[bytes], collapse: bool):
p.append(TRUNCATED + self.hash)
def _audit(self, hashes: List[bytes], bits: List[int]):
pass
class SetError(Exception):
pass
def confirm_included(root: Node, val: bytes, proof: bytes32) -> bool:
return confirm_not_included_already_hashed(root, sha256(val).digest(), proof)
def confirm_included_already_hashed(root: Node, val: bytes, proof: bytes32) -> bool:
return _confirm(root, val, proof, True)
def confirm_not_included(root: Node, val: bytes, proof: bytes32) -> bool:
return confirm_not_included_already_hashed(root, sha256(val).digest(), proof)
def confirm_not_included_already_hashed(root: Node, val: bytes, proof: bytes32) -> bool:
return _confirm(root, val, proof, False)
def _confirm(root: Node, val: bytes, proof: bytes32, expected: bool) -> bool:
try:
p = deserialize_proof(proof)
if p.get_root() != root:
return False
r, junk = p.is_included_already_hashed(val)
return r == expected
except SetError:
return False
def deserialize_proof(proof: bytes32) -> MerkleSet:
try:
r, pos = _deserialize(proof, 0, [])
if pos != len(proof):
raise SetError()
return MerkleSet(r)
except IndexError:
raise SetError()
def _deserialize(proof: bytes32, pos: int, bits: List[int]) -> Tuple[Node, int]:
t = proof[pos : pos + 1] # flake8: noqa
if t == EMPTY:
return _empty, pos + 1
if t == TERMINAL:
return TerminalNode(proof[pos + 1 : pos + 33], bits), pos + 33 # flake8: noqa
if t == TRUNCATED:
return TruncatedNode(proof[pos + 1 : pos + 33]), pos + 33 # flake8: noqa
if t != MIDDLE:
raise SetError()
v0, pos = _deserialize(proof, pos + 1, bits + [0])
v1, pos = _deserialize(proof, pos, bits + [1])
return MiddleNode([v0, v1]), pos
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/merkle_set.py
| 0.881615 | 0.542015 |
merkle_set.py
|
pypi
|
# Based on this specification from Pieter Wuille:
# https://github.com/sipa/bips/blob/bip-bech32m/bip-bech32m.mediawiki
"""Reference implementation for Bech32m and segwit addresses."""
from typing import List, Optional, Tuple
from salvia.types.blockchain_format.sized_bytes import bytes32
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
def bech32_polymod(values: List[int]) -> int:
"""Internal function that computes the Bech32 checksum."""
generator = [0x3B6A57B2, 0x26508E6D, 0x1EA119FA, 0x3D4233DD, 0x2A1462B3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1FFFFFF) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp: str) -> List[int]:
"""Expand the HRP into values for checksum computation."""
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
M = 0x2BC830A3
def bech32_verify_checksum(hrp: str, data: List[int]) -> bool:
return bech32_polymod(bech32_hrp_expand(hrp) + data) == M
def bech32_create_checksum(hrp: str, data: List[int]) -> List[int]:
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ M
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def bech32_encode(hrp: str, data: List[int]) -> str:
"""Compute a Bech32 string given HRP and data values."""
combined = data + bech32_create_checksum(hrp, data)
return hrp + "1" + "".join([CHARSET[d] for d in combined])
def bech32_decode(bech: str) -> Tuple[Optional[str], Optional[List[int]]]:
"""Validate a Bech32 string, and determine HRP and data."""
if (any(ord(x) < 33 or ord(x) > 126 for x in bech)) or (bech.lower() != bech and bech.upper() != bech):
return (None, None)
bech = bech.lower()
pos = bech.rfind("1")
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return (None, None)
if not all(x in CHARSET for x in bech[pos + 1 :]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos + 1 :]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return hrp, data[:-6]
def convertbits(data: List[int], frombits: int, tobits: int, pad: bool = True) -> List[int]:
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
raise ValueError("Invalid Value")
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
raise ValueError("Invalid bits")
return ret
def encode_puzzle_hash(puzzle_hash: bytes32, prefix: str) -> str:
encoded = bech32_encode(prefix, convertbits(puzzle_hash, 8, 5))
return encoded
def decode_puzzle_hash(address: str) -> bytes32:
hrpgot, data = bech32_decode(address)
if data is None:
raise ValueError("Invalid Address")
decoded = convertbits(data, 5, 8, False)
decoded_bytes = bytes(decoded)
return decoded_bytes
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/bech32m.py
| 0.91708 | 0.648508 |
bech32m.py
|
pypi
|
from pathlib import Path
from typing import List
from blspy import AugSchemeMPL
from salvia.util.ints import uint32
from salvia.util.keychain import Keychain
from salvia.util.validate_alert import create_alert_file, create_not_ready_alert_file, validate_alert_file
bitcoin_hash = None
bram_message = None
status = None
while True:
status_input = input("What is the status of this alert? (ready/not ready)").lower()
if status_input == "ready":
status = True
break
elif status_input == "not ready":
status = False
break
else:
print("Unknown input")
keychain: Keychain = Keychain()
print("\n___________ SELECT KEY ____________")
private_keys = keychain.get_all_private_keys()
if len(private_keys) == 0:
print("There are no saved private keys.")
quit()
print("Showing all private keys:")
for sk, seed in private_keys:
print("\nFingerprint:", sk.get_g1().get_fingerprint())
selected_key = None
while True:
user_input = input("\nEnter fingerprint of the key you want to use, or enter Q to quit: ").lower()
if user_input == "q":
quit()
for sk, seed in private_keys:
fingerprint = sk.get_g1().get_fingerprint()
pub = sk.get_g1()
if int(user_input) == fingerprint:
print(f"Selected: {fingerprint}")
selected_key = sk
break
if selected_key is not None:
break
print("\n___________ HD PATH ____________")
while True:
hd_path = input("Enter the HD path in the form 'm/12381/8444/n/n', or enter Q to quit: ").lower()
if hd_path == "q":
quit()
verify = input(f"Is this correct path: {hd_path}? (y/n) ").lower()
if verify == "y":
break
k = Keychain()
private_keys = k.get_all_private_keys()
path: List[uint32] = [uint32(int(i)) for i in hd_path.split("/") if i != "m"]
# Derive HD key using path form input
for c in path:
selected_key = AugSchemeMPL.derive_child_sk(selected_key, c)
print("Public key:", selected_key.get_g1())
# get file path
file_path = None
while True:
file_path = input("Enter the path where you want to save signed alert file, or q to quit: ")
if file_path == "q" or file_path == "Q":
quit()
file_path = file_path.strip()
y_n = input(f"Is this correct path (y/n)?: {file_path} ").lower()
if y_n == "y":
break
f_path: Path = Path(file_path)
if status is True:
print("")
print("___________ BITCOIN BLOCK HASH ____________")
while True:
bitcoin_hash = input("Insert Bitcoin block hash: ")
print(f"Bitcoin block hash = {bitcoin_hash}")
y_n = input("Does this look good (y/n): ").lower()
if y_n == "y":
break
print("")
print("___________ BRAM MESSAGE ____________")
while True:
bram_message = input("Insert message from Bram: ")
print(f"Bram message = {bram_message}")
y_n = input("Does this look good (y/n): ").lower()
if y_n == "y":
break
genesis_challenge_preimage = f"bitcoin_hash:{bitcoin_hash},bram_message:{bram_message}"
create_alert_file(f_path, selected_key, genesis_challenge_preimage)
print(f"Alert written to file {f_path}")
pubkey = f"{bytes(selected_key.get_g1()).hex()}"
validated = validate_alert_file(f_path, pubkey)
if validated:
print(f"Signature has passed validation for pubkey: {pubkey}")
else:
print(f"Signature has failed validation for pubkey: {pubkey}")
assert False
else:
create_not_ready_alert_file(f_path, selected_key)
print(f"Alert written to file {f_path}")
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/util/create_alert_file.py
| 0.41834 | 0.218544 |
create_alert_file.py
|
pypi
|
import asyncio
import logging
import sys
from pathlib import Path
import click
DEFAULT_STRIPE_SIZE = 65536
log = logging.getLogger(__name__)
def show_plots(root_path: Path):
from salvia.plotting.util import get_plot_directories
print("Directories where plots are being searched for:")
print("Note that subdirectories must be added manually")
print(
"Add with 'salvia plots add -d [dir]' and remove with"
+ " 'salvia plots remove -d [dir]'"
+ " Scan and check plots with 'salvia plots check'"
)
print()
for str_path in get_plot_directories(root_path):
print(f"{str_path}")
@click.group("plots", short_help="Manage your plots")
@click.pass_context
def plots_cmd(ctx: click.Context):
"""Create, add, remove and check your plots"""
from salvia.util.salvia_logging import initialize_logging
root_path: Path = ctx.obj["root_path"]
if not root_path.is_dir():
raise RuntimeError("Please initialize (or migrate) your config directory with 'salvia init'")
initialize_logging("", {"log_stdout": True}, root_path)
@plots_cmd.command("create", short_help="Create plots")
@click.option("-k", "--size", help="Plot size", type=int, default=32, show_default=True)
@click.option("--override-k", help="Force size smaller than 32", default=False, show_default=True, is_flag=True)
@click.option("-n", "--num", help="Number of plots or challenges", type=int, default=1, show_default=True)
@click.option("-b", "--buffer", help="Megabytes for sort/plot buffer", type=int, default=3389, show_default=True)
@click.option("-r", "--num_threads", help="Number of threads to use", type=int, default=2, show_default=True)
@click.option("-u", "--buckets", help="Number of buckets", type=int, default=128, show_default=True)
@click.option(
"-a",
"--alt_fingerprint",
type=int,
default=None,
help="Enter the alternative fingerprint of the key you want to use",
)
@click.option(
"-c",
"--pool_contract_address",
type=str,
default=None,
help="Address of where the pool reward will be sent to. Only used if alt_fingerprint and pool public key are None",
)
@click.option("-f", "--farmer_public_key", help="Hex farmer public key", type=str, default=None)
@click.option("-p", "--pool_public_key", help="Hex public key of pool", type=str, default=None)
@click.option(
"-t",
"--tmp_dir",
help="Temporary directory for plotting files",
type=click.Path(),
default=Path("."),
show_default=True,
)
@click.option("-2", "--tmp2_dir", help="Second temporary directory for plotting files", type=click.Path(), default=None)
@click.option(
"-d",
"--final_dir",
help="Final directory for plots (relative or absolute)",
type=click.Path(),
default=Path("."),
show_default=True,
)
@click.option("-i", "--plotid", help="PlotID in hex for reproducing plots (debugging only)", type=str, default=None)
@click.option("-m", "--memo", help="Memo in hex for reproducing plots (debugging only)", type=str, default=None)
@click.option("-e", "--nobitfield", help="Disable bitfield", default=False, is_flag=True)
@click.option(
"-x", "--exclude_final_dir", help="Skips adding [final dir] to harvester for farming", default=False, is_flag=True
)
@click.option(
"-D",
"--connect_to_daemon",
help="Connects to the daemon for keychain operations",
default=False,
is_flag=True,
hidden=True, # -D is only set when launched by the daemon
)
@click.pass_context
def create_cmd(
ctx: click.Context,
size: int,
override_k: bool,
num: int,
buffer: int,
num_threads: int,
buckets: int,
alt_fingerprint: int,
pool_contract_address: str,
farmer_public_key: str,
pool_public_key: str,
tmp_dir: str,
tmp2_dir: str,
final_dir: str,
plotid: str,
memo: str,
nobitfield: bool,
exclude_final_dir: bool,
connect_to_daemon: bool,
):
from salvia.plotting.create_plots import create_plots, resolve_plot_keys
class Params(object):
def __init__(self):
self.size = size
self.num = num
self.buffer = buffer
self.num_threads = num_threads
self.buckets = buckets
self.stripe_size = DEFAULT_STRIPE_SIZE
self.tmp_dir = Path(tmp_dir)
self.tmp2_dir = Path(tmp2_dir) if tmp2_dir else None
self.final_dir = Path(final_dir)
self.plotid = plotid
self.memo = memo
self.nobitfield = nobitfield
self.exclude_final_dir = exclude_final_dir
if size < 32 and not override_k:
print("k=32 is the minimum size for farming.")
print("If you are testing and you want to use smaller size please add the --override-k flag.")
sys.exit(1)
elif size < 25 and override_k:
print("Error: The minimum k size allowed from the cli is k=25.")
sys.exit(1)
plot_keys = asyncio.get_event_loop().run_until_complete(
resolve_plot_keys(
farmer_public_key,
alt_fingerprint,
pool_public_key,
pool_contract_address,
ctx.obj["root_path"],
log,
connect_to_daemon,
)
)
asyncio.get_event_loop().run_until_complete(create_plots(Params(), plot_keys, ctx.obj["root_path"]))
@plots_cmd.command("check", short_help="Checks plots")
@click.option("-n", "--num", help="Number of plots or challenges", type=int, default=None)
@click.option(
"-g",
"--grep_string",
help="Shows only plots that contain the string in the filename or directory name",
type=str,
default=None,
)
@click.option("-l", "--list_duplicates", help="List plots with duplicate IDs", default=False, is_flag=True)
@click.option("--debug-show-memo", help="Shows memo to recreate the same exact plot", default=False, is_flag=True)
@click.option("--challenge-start", help="Begins at a different [start] for -n [challenges]", type=int, default=None)
@click.pass_context
def check_cmd(
ctx: click.Context, num: int, grep_string: str, list_duplicates: bool, debug_show_memo: bool, challenge_start: int
):
from salvia.plotting.check_plots import check_plots
check_plots(ctx.obj["root_path"], num, challenge_start, grep_string, list_duplicates, debug_show_memo)
@plots_cmd.command("add", short_help="Adds a directory of plots")
@click.option(
"-d",
"--final_dir",
help="Final directory for plots (relative or absolute)",
type=click.Path(),
default=".",
show_default=True,
)
@click.pass_context
def add_cmd(ctx: click.Context, final_dir: str):
from salvia.plotting.util import add_plot_directory
add_plot_directory(ctx.obj["root_path"], final_dir)
@plots_cmd.command("remove", short_help="Removes a directory of plots from config.yaml")
@click.option(
"-d",
"--final_dir",
help="Final directory for plots (relative or absolute)",
type=click.Path(),
default=".",
show_default=True,
)
@click.pass_context
def remove_cmd(ctx: click.Context, final_dir: str):
from salvia.plotting.util import remove_plot_directory
remove_plot_directory(ctx.obj["root_path"], final_dir)
@plots_cmd.command("show", short_help="Shows the directory of current plots")
@click.pass_context
def show_cmd(ctx: click.Context):
show_plots(ctx.obj["root_path"])
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/cmds/plots.py
| 0.422147 | 0.253977 |
plots.py
|
pypi
|
from collections import Counter
from decimal import Decimal
import aiohttp
import asyncio
import functools
import json
import time
from pprint import pprint
from typing import List, Dict, Optional, Callable
from salvia.cmds.units import units
from salvia.cmds.wallet_funcs import print_balance, wallet_coin_unit
from salvia.pools.pool_wallet_info import PoolWalletInfo, PoolSingletonState
from salvia.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from salvia.rpc.farmer_rpc_client import FarmerRpcClient
from salvia.rpc.wallet_rpc_client import WalletRpcClient
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.server.server import ssl_context_for_root
from salvia.ssl.create_ssl import get_mozilla_ca_crt
from salvia.util.bech32m import encode_puzzle_hash
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.config import load_config
from salvia.util.default_root import DEFAULT_ROOT_PATH
from salvia.util.ints import uint16, uint32, uint64
from salvia.wallet.transaction_record import TransactionRecord
from salvia.wallet.util.wallet_types import WalletType
async def create_pool_args(pool_url: str) -> Dict:
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt())) as response:
if response.ok:
json_dict = json.loads(await response.text())
else:
raise ValueError(f"Response from {pool_url} not OK: {response.status}")
except Exception as e:
raise ValueError(f"Error connecting to pool {pool_url}: {e}")
if json_dict["relative_lock_height"] > 1000:
raise ValueError("Relative lock height too high for this pool, cannot join")
if json_dict["protocol_version"] != POOL_PROTOCOL_VERSION:
raise ValueError(f"Incorrect version: {json_dict['protocol_version']}, should be {POOL_PROTOCOL_VERSION}")
header_msg = f"\n---- Pool parameters fetched from {pool_url} ----"
print(header_msg)
pprint(json_dict)
print("-" * len(header_msg))
return json_dict
async def create(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
state = args["state"]
prompt = not args.get("yes", False)
fee = Decimal(args.get("fee", 0))
fee_seeds = uint64(int(fee * units["salvia"]))
# Could use initial_pool_state_from_dict to simplify
if state == "SELF_POOLING":
pool_url: Optional[str] = None
relative_lock_height = uint32(0)
target_puzzle_hash = None # wallet will fill this in
elif state == "FARMING_TO_POOL":
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
enforce_https = config["full_node"]["selected_network"] == "mainnet"
pool_url = str(args["pool_url"])
if enforce_https and not pool_url.startswith("https://"):
print(f"Pool URLs must be HTTPS on mainnet {pool_url}. Aborting.")
return
json_dict = await create_pool_args(pool_url)
relative_lock_height = json_dict["relative_lock_height"]
target_puzzle_hash = hexstr_to_bytes(json_dict["target_puzzle_hash"])
else:
raise ValueError("Plot NFT must be created in SELF_POOLING or FARMING_TO_POOL state.")
pool_msg = f" and join pool: {pool_url}" if pool_url else ""
print(f"Will create a plot NFT{pool_msg}.")
if prompt:
user_input: str = input("Confirm [n]/y: ")
else:
user_input = "yes"
if user_input.lower() == "y" or user_input.lower() == "yes":
try:
tx_record: TransactionRecord = await wallet_client.create_new_pool_wallet(
target_puzzle_hash,
pool_url,
relative_lock_height,
"localhost:5000",
"new",
state,
fee_seeds,
)
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(str(1), tx_record.name)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do salvia wallet get_transaction -f {fingerprint} -tx 0x{tx_record.name} to get status")
return None
except Exception as e:
print(f"Error creating plot NFT: {e}")
return
print("Aborting.")
async def pprint_pool_wallet_state(
wallet_client: WalletRpcClient,
wallet_id: int,
pool_wallet_info: PoolWalletInfo,
address_prefix: str,
pool_state_dict: Dict,
plot_counts: Counter,
):
if pool_wallet_info.current.state == PoolSingletonState.LEAVING_POOL and pool_wallet_info.target is None:
expected_leave_height = pool_wallet_info.singleton_block_height + pool_wallet_info.current.relative_lock_height
print(f"Current state: INVALID_STATE. Please leave/join again after block height {expected_leave_height}")
else:
print(f"Current state: {PoolSingletonState(pool_wallet_info.current.state).name}")
print(f"Current state from block height: {pool_wallet_info.singleton_block_height}")
print(f"Launcher ID: {pool_wallet_info.launcher_id}")
print(
"Target address (not for plotting): "
f"{encode_puzzle_hash(pool_wallet_info.current.target_puzzle_hash, address_prefix)}"
)
print(f"Number of plots: {plot_counts[pool_wallet_info.p2_singleton_puzzle_hash]}")
print(f"Owner public key: {pool_wallet_info.current.owner_pubkey}")
print(
f"Pool contract address (use ONLY for plotting - do not send money to this address): "
f"{encode_puzzle_hash(pool_wallet_info.p2_singleton_puzzle_hash, address_prefix)}"
)
if pool_wallet_info.target is not None:
print(f"Target state: {PoolSingletonState(pool_wallet_info.target.state).name}")
print(f"Target pool URL: {pool_wallet_info.target.pool_url}")
if pool_wallet_info.current.state == PoolSingletonState.SELF_POOLING.value:
balances: Dict = await wallet_client.get_wallet_balance(str(wallet_id))
balance = balances["confirmed_wallet_balance"]
typ = WalletType(int(WalletType.POOLING_WALLET))
address_prefix, scale = wallet_coin_unit(typ, address_prefix)
print(f"Claimable balance: {print_balance(balance, scale, address_prefix)}")
if pool_wallet_info.current.state == PoolSingletonState.FARMING_TO_POOL:
print(f"Current pool URL: {pool_wallet_info.current.pool_url}")
if pool_wallet_info.launcher_id in pool_state_dict:
pool_state = pool_state_dict[pool_wallet_info.launcher_id]
print(f"Current difficulty: {pool_state_dict[pool_wallet_info.launcher_id]['current_difficulty']}")
print(f"Points balance: {pool_state_dict[pool_wallet_info.launcher_id]['current_points']}")
points_found_24h = [points for timestamp, points in pool_state["points_found_24h"]]
points_acknowledged_24h = [points for timestamp, points in pool_state["points_acknowledged_24h"]]
summed_points_found_24h = sum(points_found_24h)
summed_points_acknowledged_24h = sum(points_acknowledged_24h)
if summed_points_found_24h == 0:
success_pct = 0.0
else:
success_pct = summed_points_acknowledged_24h / summed_points_found_24h
print(f"Points found (24h): {summed_points_found_24h}")
print(f"Percent Successful Points (24h): {success_pct:.2%}")
print(f"Relative lock height: {pool_wallet_info.current.relative_lock_height} blocks")
payout_instructions: str = pool_state_dict[pool_wallet_info.launcher_id]["pool_config"]["payout_instructions"]
try:
payout_address = encode_puzzle_hash(bytes32.fromhex(payout_instructions), address_prefix)
print(f"Payout instructions (pool will pay to this address): {payout_address}")
except Exception:
print(f"Payout instructions (pool will pay you with this): {payout_instructions}")
if pool_wallet_info.current.state == PoolSingletonState.LEAVING_POOL:
expected_leave_height = pool_wallet_info.singleton_block_height + pool_wallet_info.current.relative_lock_height
if pool_wallet_info.target is not None:
print(f"Expected to leave after block height: {expected_leave_height}")
async def show(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
summaries_response = await wallet_client.get_wallets()
wallet_id_passed_in = args.get("id", None)
plot_counts: Counter = Counter()
try:
pool_state_list: List = (await farmer_client.get_pool_state())["pool_state"]
harvesters = await farmer_client.get_harvesters()
for d in harvesters["harvesters"]:
for plot in d["plots"]:
if plot.get("pool_contract_puzzle_hash", None) is not None:
# Non pooled plots will have a None pool_contract_puzzle_hash
plot_counts[hexstr_to_bytes(plot["pool_contract_puzzle_hash"])] += 1
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if farmer is running at {farmer_rpc_port}."
f" You can run the farmer by:\n salvia start farmer-only"
)
else:
print(f"Exception from 'wallet' {e}")
farmer_client.close()
await farmer_client.await_closed()
return
pool_state_dict: Dict[bytes32, Dict] = {
hexstr_to_bytes(pool_state_item["pool_config"]["launcher_id"]): pool_state_item
for pool_state_item in pool_state_list
}
if wallet_id_passed_in is not None:
for summary in summaries_response:
typ = WalletType(int(summary["type"]))
if summary["id"] == wallet_id_passed_in and typ != WalletType.POOLING_WALLET:
print(f"Wallet with id: {wallet_id_passed_in} is not a pooling wallet. Please provide a different id.")
return
pool_wallet_info, _ = await wallet_client.pw_status(wallet_id_passed_in)
await pprint_pool_wallet_state(
wallet_client,
wallet_id_passed_in,
pool_wallet_info,
address_prefix,
pool_state_dict,
plot_counts,
)
else:
print(f"Wallet height: {await wallet_client.get_height_info()}")
print(f"Sync status: {'Synced' if (await wallet_client.get_synced()) else 'Not synced'}")
for summary in summaries_response:
wallet_id = summary["id"]
typ = WalletType(int(summary["type"]))
if typ == WalletType.POOLING_WALLET:
print(f"Wallet id {wallet_id}: ")
pool_wallet_info, _ = await wallet_client.pw_status(wallet_id)
await pprint_pool_wallet_state(
wallet_client,
wallet_id,
pool_wallet_info,
address_prefix,
pool_state_dict,
plot_counts,
)
print("")
farmer_client.close()
await farmer_client.await_closed()
async def get_login_link(launcher_id_str: str) -> None:
launcher_id: bytes32 = hexstr_to_bytes(launcher_id_str)
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
farmer_rpc_port = config["farmer"]["rpc_port"]
farmer_client = await FarmerRpcClient.create(self_hostname, uint16(farmer_rpc_port), DEFAULT_ROOT_PATH, config)
try:
login_link: Optional[str] = await farmer_client.get_pool_login_link(launcher_id)
if login_link is None:
print("Was not able to get login link.")
else:
print(login_link)
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if farmer is running at {farmer_rpc_port}."
f" You can run the farmer by:\n salvia start farmer-only"
)
else:
print(f"Exception from 'farmer' {e}")
finally:
farmer_client.close()
await farmer_client.await_closed()
async def submit_tx_with_confirmation(
message: str, prompt: bool, func: Callable, wallet_client: WalletRpcClient, fingerprint: int, wallet_id: int
):
print(message)
if prompt:
user_input: str = input("Confirm [n]/y: ")
else:
user_input = "yes"
if user_input.lower() == "y" or user_input.lower() == "yes":
try:
tx_record: TransactionRecord = await func()
start = time.time()
while time.time() - start < 10:
await asyncio.sleep(0.1)
tx = await wallet_client.get_transaction(str(1), tx_record.name)
if len(tx.sent_to) > 0:
print(f"Transaction submitted to nodes: {tx.sent_to}")
print(f"Do salvia wallet get_transaction -f {fingerprint} -tx 0x{tx_record.name} to get status")
return None
except Exception as e:
print(f"Error performing operation on Plot NFT -f {fingerprint} wallet id: {wallet_id}: {e}")
return
print("Aborting.")
async def join_pool(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
enforce_https = config["full_node"]["selected_network"] == "mainnet"
pool_url: str = args["pool_url"]
fee = Decimal(args.get("fee", 0))
fee_seeds = uint64(int(fee * units["salvia"]))
if enforce_https and not pool_url.startswith("https://"):
print(f"Pool URLs must be HTTPS on mainnet {pool_url}. Aborting.")
return
wallet_id = args.get("id", None)
prompt = not args.get("yes", False)
try:
async with aiohttp.ClientSession() as session:
async with session.get(f"{pool_url}/pool_info", ssl=ssl_context_for_root(get_mozilla_ca_crt())) as response:
if response.ok:
json_dict = json.loads(await response.text())
else:
print(f"Response not OK: {response.status}")
return
except Exception as e:
print(f"Error connecting to pool {pool_url}: {e}")
return
if json_dict["relative_lock_height"] > 1000:
print("Relative lock height too high for this pool, cannot join")
return
if json_dict["protocol_version"] != POOL_PROTOCOL_VERSION:
print(f"Incorrect version: {json_dict['protocol_version']}, should be {POOL_PROTOCOL_VERSION}")
return
pprint(json_dict)
msg = f"\nWill join pool: {pool_url} with Plot NFT {fingerprint}."
func = functools.partial(
wallet_client.pw_join_pool,
wallet_id,
hexstr_to_bytes(json_dict["target_puzzle_hash"]),
pool_url,
json_dict["relative_lock_height"],
fee_seeds,
)
await submit_tx_with_confirmation(msg, prompt, func, wallet_client, fingerprint, wallet_id)
async def self_pool(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args.get("id", None)
prompt = not args.get("yes", False)
fee = Decimal(args.get("fee", 0))
fee_seeds = uint64(int(fee * units["salvia"]))
msg = f"Will start self-farming with Plot NFT on wallet id {wallet_id} fingerprint {fingerprint}."
func = functools.partial(wallet_client.pw_self_pool, wallet_id, fee_seeds)
await submit_tx_with_confirmation(msg, prompt, func, wallet_client, fingerprint, wallet_id)
async def inspect_cmd(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args.get("id", None)
pool_wallet_info, unconfirmed_transactions = await wallet_client.pw_status(wallet_id)
print(
{
"pool_wallet_info": pool_wallet_info,
"unconfirmed_transactions": [
{"sent_to": tx.sent_to, "transaction_id": tx.name.hex()} for tx in unconfirmed_transactions
],
}
)
async def claim_cmd(args: dict, wallet_client: WalletRpcClient, fingerprint: int) -> None:
wallet_id = args.get("id", None)
fee = Decimal(args.get("fee", 0))
fee_seeds = uint64(int(fee * units["salvia"]))
msg = f"\nWill claim rewards for wallet ID: {wallet_id}."
func = functools.partial(
wallet_client.pw_absorb_rewards,
wallet_id,
fee_seeds,
)
await submit_tx_with_confirmation(msg, False, func, wallet_client, fingerprint, wallet_id)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/cmds/plotnft_funcs.py
| 0.559531 | 0.162879 |
plotnft_funcs.py
|
pypi
|
import click
@click.group("keys", short_help="Manage your keys")
@click.pass_context
def keys_cmd(ctx: click.Context):
"""Create, delete, view and use your key pairs"""
from pathlib import Path
root_path: Path = ctx.obj["root_path"]
if not root_path.is_dir():
raise RuntimeError("Please initialize (or migrate) your config directory with salvia init")
@keys_cmd.command("generate", short_help="Generates and adds a key to keychain")
@click.pass_context
def generate_cmd(ctx: click.Context):
from .init_funcs import check_keys
from .keys_funcs import generate_and_add
generate_and_add()
check_keys(ctx.obj["root_path"])
@keys_cmd.command("show", short_help="Displays all the keys in keychain")
@click.option(
"--show-mnemonic-seed", help="Show the mnemonic seed of the keys", default=False, show_default=True, is_flag=True
)
def show_cmd(show_mnemonic_seed):
from .keys_funcs import show_all_keys
show_all_keys(show_mnemonic_seed)
@keys_cmd.command("add", short_help="Add a private key by mnemonic")
@click.option(
"--filename",
"-f",
default=None,
help="The filename containing the secret key mnemonic to add",
type=str,
required=False,
)
@click.pass_context
def add_cmd(ctx: click.Context, filename: str):
from .init_funcs import check_keys
if filename:
from pathlib import Path
from .keys_funcs import add_private_key_seed
mnemonic = Path(filename).read_text().rstrip()
add_private_key_seed(mnemonic)
else:
from .keys_funcs import query_and_add_private_key_seed
query_and_add_private_key_seed()
check_keys(ctx.obj["root_path"])
@keys_cmd.command("delete", short_help="Delete a key by its pk fingerprint in hex form")
@click.option(
"--fingerprint",
"-f",
default=None,
help="Enter the fingerprint of the key you want to use",
type=int,
required=True,
)
@click.pass_context
def delete_cmd(ctx: click.Context, fingerprint: int):
from .init_funcs import check_keys
from .keys_funcs import delete
delete(fingerprint)
check_keys(ctx.obj["root_path"])
@keys_cmd.command("delete_all", short_help="Delete all private keys in keychain")
def delete_all_cmd():
from .keys_funcs import keychain
keychain.delete_all_keys()
@keys_cmd.command("generate_and_print", short_help="Generates but does NOT add to keychain")
def generate_and_print_cmd():
from .keys_funcs import generate_and_print
generate_and_print()
@keys_cmd.command("sign", short_help="Sign a message with a private key")
@click.option("--message", "-d", default=None, help="Enter the message to sign in UTF-8", type=str, required=True)
@click.option(
"--fingerprint",
"-f",
default=None,
help="Enter the fingerprint of the key you want to use",
type=int,
required=True,
)
@click.option("--hd_path", "-t", help="Enter the HD path in the form 'm/12381/8444/n/n'", type=str, required=True)
@click.option(
"--as-bytes",
"-b",
help="Sign the message as sequence of bytes rather than UTF-8 string",
default=False,
show_default=True,
is_flag=True,
)
def sign_cmd(message: str, fingerprint: int, hd_path: str, as_bytes: bool):
from .keys_funcs import sign
sign(message, fingerprint, hd_path, as_bytes)
@keys_cmd.command("verify", short_help="Verify a signature with a pk")
@click.option("--message", "-d", default=None, help="Enter the message to sign in UTF-8", type=str, required=True)
@click.option("--public_key", "-p", default=None, help="Enter the pk in hex", type=str, required=True)
@click.option("--signature", "-s", default=None, help="Enter the signature in hex", type=str, required=True)
def verify_cmd(message: str, public_key: str, signature: str):
from .keys_funcs import verify
verify(message, public_key, signature)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/cmds/keys.py
| 0.45302 | 0.248386 |
keys.py
|
pypi
|
from typing import List
from blspy import AugSchemeMPL, G1Element, G2Element
from salvia.consensus.coinbase import create_puzzlehash_for_pk
from salvia.util.bech32m import encode_puzzle_hash
from salvia.util.config import load_config
from salvia.util.default_root import DEFAULT_ROOT_PATH
from salvia.util.ints import uint32
from salvia.util.keychain import Keychain, bytes_to_mnemonic, generate_mnemonic, unlocks_keyring
from salvia.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_pool_sk, master_sk_to_wallet_sk
keychain: Keychain = Keychain()
def generate_and_print():
"""
Generates a seed for a private key, and prints the mnemonic to the terminal.
"""
mnemonic = generate_mnemonic()
print("Generating private key. Mnemonic (24 secret words):")
print(mnemonic)
print("Note that this key has not been added to the keychain. Run salvia keys add")
return mnemonic
@unlocks_keyring(use_passphrase_cache=True)
def generate_and_add():
"""
Generates a seed for a private key, prints the mnemonic to the terminal, and adds the key to the keyring.
"""
mnemonic = generate_mnemonic()
print("Generating private key")
add_private_key_seed(mnemonic)
@unlocks_keyring(use_passphrase_cache=True)
def query_and_add_private_key_seed():
mnemonic = input("Enter the mnemonic you want to use: ")
add_private_key_seed(mnemonic)
@unlocks_keyring(use_passphrase_cache=True)
def add_private_key_seed(mnemonic: str):
"""
Add a private key seed to the keyring, with the given mnemonic.
"""
try:
passphrase = ""
sk = keychain.add_private_key(mnemonic, passphrase)
fingerprint = sk.get_g1().get_fingerprint()
print(f"Added private key with public key fingerprint {fingerprint}")
except ValueError as e:
print(e)
return None
@unlocks_keyring(use_passphrase_cache=True)
def show_all_keys(show_mnemonic: bool):
"""
Prints all keys and mnemonics (if available).
"""
root_path = DEFAULT_ROOT_PATH
config = load_config(root_path, "config.yaml")
private_keys = keychain.get_all_private_keys()
selected = config["selected_network"]
prefix = config["network_overrides"]["config"][selected]["address_prefix"]
if len(private_keys) == 0:
print("There are no saved private keys")
return None
msg = "Showing all public keys derived from your master seed and private key:"
if show_mnemonic:
msg = "Showing all public and private keys"
print(msg)
for sk, seed in private_keys:
print("")
print("Fingerprint:", sk.get_g1().get_fingerprint())
print("Master public key (m):", sk.get_g1())
print(
"Farmer public key (m/12381/8444/0/0):",
master_sk_to_farmer_sk(sk).get_g1(),
)
print("Pool public key (m/12381/8444/1/0):", master_sk_to_pool_sk(sk).get_g1())
print(
"First wallet address:",
encode_puzzle_hash(create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(0)).get_g1()), prefix),
)
assert seed is not None
if show_mnemonic:
print("Master private key (m):", bytes(sk).hex())
print(
"First wallet secret key (m/12381/8444/2/0):",
master_sk_to_wallet_sk(sk, uint32(0)),
)
mnemonic = bytes_to_mnemonic(seed)
print(" Mnemonic seed (24 secret words):")
print(mnemonic)
@unlocks_keyring(use_passphrase_cache=True)
def delete(fingerprint: int):
"""
Delete a key by its public key fingerprint (which is an integer).
"""
print(f"Deleting private_key with fingerprint {fingerprint}")
keychain.delete_key_by_fingerprint(fingerprint)
@unlocks_keyring(use_passphrase_cache=True)
def sign(message: str, fingerprint: int, hd_path: str, as_bytes: bool):
k = Keychain()
private_keys = k.get_all_private_keys()
path: List[uint32] = [uint32(int(i)) for i in hd_path.split("/") if i != "m"]
for sk, _ in private_keys:
if sk.get_g1().get_fingerprint() == fingerprint:
for c in path:
sk = AugSchemeMPL.derive_child_sk(sk, c)
data = bytes.fromhex(message) if as_bytes else bytes(message, "utf-8")
print("Public key:", sk.get_g1())
print("Signature:", AugSchemeMPL.sign(sk, data))
return None
print(f"Fingerprint {fingerprint} not found in keychain")
def verify(message: str, public_key: str, signature: str):
messageBytes = bytes(message, "utf-8")
public_key = G1Element.from_bytes(bytes.fromhex(public_key))
signature = G2Element.from_bytes(bytes.fromhex(signature))
print(AugSchemeMPL.verify(public_key, messageBytes, signature))
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/cmds/keys_funcs.py
| 0.710528 | 0.405449 |
keys_funcs.py
|
pypi
|
from typing import Optional
import click
@click.group("farm", short_help="Manage your farm")
def farm_cmd() -> None:
pass
@farm_cmd.command("summary", short_help="Summary of farming information")
@click.option(
"-p",
"--rpc-port",
help=(
"Set the port where the Full Node is hosting the RPC interface. "
"See the rpc_port under full_node in config.yaml"
),
type=int,
default=None,
show_default=True,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
show_default=True,
)
@click.option(
"-hp",
"--harvester-rpc-port",
help=(
"Set the port where the Harvester is hosting the RPC interface"
"See the rpc_port under harvester in config.yaml"
),
type=int,
default=None,
show_default=True,
)
@click.option(
"-fp",
"--farmer-rpc-port",
help=(
"Set the port where the Farmer is hosting the RPC interface. " "See the rpc_port under farmer in config.yaml"
),
type=int,
default=None,
show_default=True,
)
def summary_cmd(
rpc_port: Optional[int],
wallet_rpc_port: Optional[int],
harvester_rpc_port: Optional[int],
farmer_rpc_port: Optional[int],
) -> None:
from .farm_funcs import summary
import asyncio
asyncio.run(summary(rpc_port, wallet_rpc_port, harvester_rpc_port, farmer_rpc_port))
@farm_cmd.command("challenges", short_help="Show the latest challenges")
@click.option(
"-fp",
"--farmer-rpc-port",
help="Set the port where the Farmer is hosting the RPC interface. See the rpc_port under farmer in config.yaml",
type=int,
default=None,
show_default=True,
)
@click.option(
"-l",
"--limit",
help="Limit the number of challenges shown. Use 0 to disable the limit",
type=click.IntRange(0),
default=20,
show_default=True,
)
def challenges_cmd(farmer_rpc_port: Optional[int], limit: int) -> None:
from .farm_funcs import challenges
import asyncio
asyncio.run(challenges(farmer_rpc_port, limit))
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/cmds/farm.py
| 0.754644 | 0.236439 |
farm.py
|
pypi
|
import click
from salvia.util.keychain import supports_keyring_passphrase
@click.command("init", short_help="Create or migrate the configuration")
@click.option(
"--create-certs",
"-c",
default=None,
help="Create new SSL certificates based on CA in [directory]",
type=click.Path(),
)
@click.option(
"--fix-ssl-permissions",
is_flag=True,
help="Attempt to fix SSL certificate/key file permissions",
)
@click.option("--testnet", is_flag=True, help="Configure this salvia install to connect to the testnet")
@click.option("--set-passphrase", "-s", is_flag=True, help="Protect your keyring with a passphrase")
@click.pass_context
def init_cmd(ctx: click.Context, create_certs: str, fix_ssl_permissions: bool, testnet: bool, **kwargs):
"""
Create a new configuration or migrate from previous versions to current
\b
Follow these steps to create new certificates for a remote harvester:
- Make a copy of your Farming Machine CA directory: ~/.salvia/[version]/config/ssl/ca
- Shut down all salvia daemon processes with `salvia stop all -d`
- Run `salvia init -c [directory]` on your remote harvester,
where [directory] is the the copy of your Farming Machine CA directory
- Get more details on remote harvester on Salvia wiki:
https://github.com/Salvia-Network/salvia-blockchain/wiki/Farming-on-many-machines
"""
from pathlib import Path
from .init_funcs import init
from salvia.cmds.passphrase_funcs import initialize_passphrase
set_passphrase = kwargs.get("set_passphrase")
if set_passphrase:
initialize_passphrase()
init(Path(create_certs) if create_certs is not None else None, ctx.obj["root_path"], fix_ssl_permissions, testnet)
if not supports_keyring_passphrase():
from salvia.cmds.passphrase_funcs import remove_passphrase_options_from_cmd
# TODO: Remove once keyring passphrase management is rolled out to all platforms
remove_passphrase_options_from_cmd(init_cmd)
if __name__ == "__main__":
from .init_funcs import salvia_init
from salvia.util.default_root import DEFAULT_ROOT_PATH
salvia_init(DEFAULT_ROOT_PATH)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/cmds/init.py
| 0.423577 | 0.173288 |
init.py
|
pypi
|
from decimal import Decimal
from typing import Optional
import click
MAX_CMDLINE_FEE = Decimal(0.5)
def validate_fee(ctx, param, value):
try:
fee = Decimal(value)
except ValueError:
raise click.BadParameter("Fee must be decimal dotted value in XSLV (e.g. 0.00005)")
if fee < 0 or fee > MAX_CMDLINE_FEE:
raise click.BadParameter(f"Fee must be in the range 0 to {MAX_CMDLINE_FEE}")
return value
@click.group("plotnft", short_help="Manage your plot NFTs")
def plotnft_cmd() -> None:
pass
@plotnft_cmd.command("show", short_help="Show plotnft information")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=False)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def show_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet
from .plotnft_funcs import show
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, {"id": id}, show))
@plotnft_cmd.command(
"get_login_link", short_help="Create a login link for a pool. To get the launcher id, use plotnft show."
)
@click.option("-l", "--launcher_id", help="Launcher ID of the plotnft", type=str, required=True)
def get_login_link_cmd(launcher_id: str) -> None:
import asyncio
from .plotnft_funcs import get_login_link
asyncio.run(get_login_link(launcher_id))
@plotnft_cmd.command("create", short_help="Create a plot NFT")
@click.option("-y", "--yes", help="No prompts", is_flag=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-u", "--pool_url", help="HTTPS host:port of the pool to join", type=str, required=False)
@click.option("-s", "--state", help="Initial state of Plot NFT: local or pool", type=str, required=True)
@click.option(
"-m",
"--fee",
help="Set the fees per transaction, in XSLV. Fee is used TWICE: once to create the singleton, once for init.",
type=str,
default="0",
show_default=True,
required=True,
callback=validate_fee,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
def create_cmd(
wallet_rpc_port: Optional[int], fingerprint: int, pool_url: str, state: str, fee: int, yes: bool
) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet
from .plotnft_funcs import create
if pool_url is not None and state.lower() == "local":
print(f" pool_url argument [{pool_url}] is not allowed when creating in 'local' state")
return
if pool_url in [None, ""] and state.lower() == "pool":
print(" pool_url argument (-u) is required for pool starting state")
return
valid_initial_states = {"pool": "FARMING_TO_POOL", "local": "SELF_POOLING"}
extra_params = {"pool_url": pool_url, "state": valid_initial_states[state], "fee": fee, "yes": yes}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, create))
@plotnft_cmd.command("join", short_help="Join a plot NFT to a Pool")
@click.option("-y", "--yes", help="No prompts", is_flag=True)
@click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-u", "--pool_url", help="HTTPS host:port of the pool to join", type=str, required=True)
@click.option(
"-m",
"--fee",
help="Set the fees per transaction, in XSLV. Fee is used TWICE: once to leave pool, once to join.",
type=str,
default="0",
show_default=True,
required=True,
callback=validate_fee,
)
@click.option(
"--fee",
help="Fee Per Transaction, in Mojos. Fee is used TWICE: once to leave pool, once to join.",
type=int,
callback=validate_fee,
default=0,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
def join_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, fee: int, pool_url: str, yes: bool) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet
from .plotnft_funcs import join_pool
extra_params = {"pool_url": pool_url, "id": id, "fee": fee, "yes": yes}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, join_pool))
@plotnft_cmd.command("leave", short_help="Leave a pool and return to self-farming")
@click.option("-y", "--yes", help="No prompts", is_flag=True)
@click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option(
"-m",
"--fee",
help="Set the fees per transaction, in XSLV. Fee is charged TWICE.",
type=str,
default="0",
show_default=True,
required=True,
callback=validate_fee,
)
@click.option(
"--fee",
help="Transaction Fee, in Mojos. Fee is charged twice if already in a pool.",
type=int,
callback=validate_fee,
default=0,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
def self_pool_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, fee: int, yes: bool) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet
from .plotnft_funcs import self_pool
extra_params = {"id": id, "fee": fee, "yes": yes}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, self_pool))
@plotnft_cmd.command("inspect", short_help="Get Detailed plotnft information as JSON")
@click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
def inspect(wallet_rpc_port: Optional[int], fingerprint: int, id: int) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet
from .plotnft_funcs import inspect_cmd
extra_params = {"id": id}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, inspect_cmd))
@plotnft_cmd.command("claim", short_help="Claim rewards from a plot NFT")
@click.option("-i", "--id", help="ID of the wallet to use", type=int, default=None, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option(
"-m",
"--fee",
help="Set the fees per transaction, in XSLV.",
type=str,
default="0",
show_default=True,
required=True,
callback=validate_fee,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
def claim(wallet_rpc_port: Optional[int], fingerprint: int, id: int, fee: int) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet
from .plotnft_funcs import claim_cmd
extra_params = {"id": id, "fee": fee}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, claim_cmd))
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/cmds/plotnft.py
| 0.719482 | 0.291737 |
plotnft.py
|
pypi
|
import asyncio
import json
import traceback
import os
import sys
import logging
from pathlib import Path
from typing import Any, Dict, Optional, Tuple
from salvia.plotting.create_plots import resolve_plot_keys
from salvia.plotters.plotters_util import run_plotter, run_command
log = logging.getLogger(__name__)
BLADEBIT_PLOTTER_DIR = "bladebit"
def is_bladebit_supported() -> bool:
return sys.platform.startswith("linux") or sys.platform in ["win32", "cygwin"]
def meets_memory_requirement(plotters_root_path: Path) -> Tuple[bool, Optional[str]]:
have_enough_memory: bool = False
warning_string: Optional[str] = None
if get_bladebit_executable_path(plotters_root_path).exists():
try:
proc = run_command(
[os.fspath(get_bladebit_executable_path(plotters_root_path)), "--memory-json"],
"Failed to call bladebit with --memory-json option",
capture_output=True,
text=True,
)
memory_info: Dict[str, int] = json.loads(proc.stdout)
total_bytes: int = memory_info.get("total", -1)
required_bytes: int = memory_info.get("required", 0)
have_enough_memory = total_bytes >= required_bytes
if have_enough_memory is False:
warning_string = f"BladeBit requires at least {int(required_bytes / 1024**3)} GiB of RAM to operate"
except Exception as e:
print(f"Failed to determine bladebit memory requirements: {e}")
return have_enough_memory, warning_string
def get_bladebit_install_path(plotters_root_path: Path) -> Path:
return plotters_root_path / BLADEBIT_PLOTTER_DIR
def get_bladebit_package_path() -> Path:
return Path(os.path.dirname(sys.executable)) / "bladebit"
def get_bladebit_executable_path(plotters_root_path: Path) -> Path:
bladebit_dir: Path = get_bladebit_package_path()
bladebit_exec: str = "bladebit"
build_dir: str = "build"
if sys.platform in ["win32", "cygwin"]:
bladebit_exec = "bladebit.exe"
build_dir = "build/Release"
if not bladebit_dir.exists():
bladebit_dir = get_bladebit_install_path(plotters_root_path) / build_dir
return bladebit_dir / bladebit_exec
def get_bladebit_install_info(plotters_root_path: Path) -> Optional[Dict[str, Any]]:
info: Dict[str, Any] = {"display_name": "BladeBit Plotter"}
installed: bool = False
supported: bool = is_bladebit_supported()
if get_bladebit_executable_path(plotters_root_path).exists():
version: Optional[str] = None
try:
proc = run_command(
[os.fspath(get_bladebit_executable_path(plotters_root_path)), "--version"],
"Failed to call bladebit with --version option",
capture_output=True,
text=True,
)
version = proc.stdout.strip()
except Exception as e:
print(f"Failed to determine bladebit version: {e}")
if version is not None:
installed = True
info["version"] = version
else:
installed = False
info["installed"] = installed
if installed is False:
info["can_install"] = supported
if supported:
_, memory_warning = meets_memory_requirement(plotters_root_path)
if memory_warning is not None:
info["bladebit_memory_warning"] = memory_warning
return info
progress = {
"Finished F1 sort": 0.01,
"Finished forward propagating table 2": 0.06,
"Finished forward propagating table 3": 0.12,
"Finished forward propagating table 4": 0.2,
"Finished forward propagating table 5": 0.28,
"Finished forward propagating table 6": 0.36,
"Finished forward propagating table 7": 0.42,
"Finished prunning table 6": 0.43,
"Finished prunning table 5": 0.48,
"Finished prunning table 4": 0.51,
"Finished prunning table 3": 0.55,
"Finished prunning table 2": 0.58,
"Finished compressing tables 1 and 2": 0.66,
"Finished compressing tables 2 and 3": 0.73,
"Finished compressing tables 3 and 4": 0.79,
"Finished compressing tables 4 and 5": 0.85,
"Finished compressing tables 5 and 6": 0.92,
"Finished compressing tables 6 and 7": 0.98,
}
def install_bladebit(root_path):
if is_bladebit_supported():
print("Installing dependencies.")
run_command(
[
"sudo",
"apt",
"install",
"-y",
"build-essential",
"cmake",
"libnuma-dev",
"git",
],
"Could not install dependencies",
)
print("Cloning repository and its submodules.")
run_command(
[
"git",
"clone",
"--recursive",
"https://github.com/Salvia-Network/bladebit.git",
],
"Could not clone bladebit repository",
cwd=os.fspath(root_path),
)
bladebit_path: str = os.fspath(root_path.joinpath("bladebit"))
build_path: str = os.fspath(Path(bladebit_path) / "build")
print("Build bladebit.")
run_command(["mkdir", build_path], "Failed to create build directory", cwd=bladebit_path)
run_command(["cmake", ".."], "Failed to generate build config", cwd=build_path)
run_command(
["cmake", "--build", ".", "--target", "bladebit", "--config", "Release"],
"Building bladebit failed",
cwd=build_path,
)
else:
raise RuntimeError("Platform not supported yet for bladebit plotter.")
def plot_bladebit(args, salvia_root_path, root_path):
if not os.path.exists(get_bladebit_executable_path(root_path)):
print("Installing bladebit plotter.")
try:
install_bladebit(root_path)
except Exception as e:
print(f"Exception while installing bladebit plotter: {e}")
return
plot_keys = asyncio.get_event_loop().run_until_complete(
resolve_plot_keys(
None if args.farmerkey == b"" else args.farmerkey.hex(),
None,
None if args.pool_key == b"" else args.pool_key.hex(),
None if args.contract == "" else args.contract,
salvia_root_path,
log,
args.connect_to_daemon,
)
)
call_args = []
call_args.append(os.fspath(get_bladebit_executable_path(root_path)))
call_args.append("-t")
call_args.append(str(args.threads))
call_args.append("-n")
call_args.append(str(args.count))
call_args.append("-f")
call_args.append(bytes(plot_keys.farmer_public_key).hex())
if plot_keys.pool_public_key is not None:
call_args.append("-p")
call_args.append(bytes(plot_keys.pool_public_key).hex())
if plot_keys.pool_contract_address is not None:
call_args.append("-c")
call_args.append(plot_keys.pool_contract_address)
if args.warmstart:
call_args.append("-w")
if args.id is not None and args.id != b"":
call_args.append("-i")
call_args.append(args.id.hex())
if args.verbose:
call_args.append("-v")
if args.nonuma:
call_args.append("-m")
call_args.append(args.finaldir)
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_plotter(call_args, progress))
except Exception as e:
print(f"Exception while plotting: {e} {type(e)}")
print(f"Traceback: {traceback.format_exc()}")
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/plotters/bladebit.py
| 0.439627 | 0.187914 |
bladebit.py
|
pypi
|
import asyncio
import traceback
import os
import logging
import sys
from pathlib import Path
from typing import Any, Dict, Optional
from salvia.plotting.create_plots import resolve_plot_keys
from salvia.plotters.plotters_util import run_plotter, run_command
log = logging.getLogger(__name__)
MADMAX_PLOTTER_DIR = "madmax-plotter"
def is_madmax_supported() -> bool:
return sys.platform.startswith("linux") or sys.platform in ["darwin", "win32", "cygwin"]
def get_madmax_install_path(plotters_root_path: Path) -> Path:
return plotters_root_path / MADMAX_PLOTTER_DIR
def get_madmax_package_path() -> Path:
return Path(os.path.dirname(sys.executable)) / "madmax"
def get_madmax_executable_path_for_ksize(plotters_root_path: Path, ksize: int = 32) -> Path:
madmax_dir: Path = get_madmax_package_path()
madmax_exec: str = "salvia_plot"
if ksize > 32:
madmax_exec += "_k34" # Use the salvia_plot_k34 executable for k-sizes > 32
if sys.platform in ["win32", "cygwin"]:
madmax_exec += ".exe"
if not madmax_dir.exists():
madmax_dir = get_madmax_install_path(plotters_root_path) / "build"
return madmax_dir / madmax_exec
def get_madmax_install_info(plotters_root_path: Path) -> Optional[Dict[str, Any]]:
info: Dict[str, Any] = {"display_name": "madMAx Plotter"}
installed: bool = False
supported: bool = is_madmax_supported()
if get_madmax_executable_path_for_ksize(plotters_root_path).exists():
try:
proc = run_command(
[os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path)), "--version"],
"Failed to call madmax with --version option",
capture_output=True,
text=True,
)
version = proc.stdout.strip()
except Exception as e:
print(f"Failed to determine madmax version: {e}")
if version is not None:
installed = True
info["version"] = version
else:
installed = False
info["installed"] = installed
if installed is False:
info["can_install"] = supported
return info
def install_madmax(plotters_root_path: Path):
if is_madmax_supported():
print("Installing dependencies.")
if sys.platform.startswith("linux"):
run_command(
[
"sudo",
"apt",
"install",
"-y",
"libsodium-dev",
"cmake",
"g++",
"git",
"build-essential",
],
"Could not install dependencies",
)
if sys.platform.startswith("darwin"):
run_command(
[
"brew",
"install",
"libsodium",
"cmake",
"git",
"autoconf",
"automake",
"libtool",
"wget",
],
"Could not install dependencies",
)
run_command(["git", "--version"], "Error checking Git version.")
print("Cloning git repository.")
run_command(
[
"git",
"clone",
"https://github.com/Salvia-Network/salvia-plotter-madmax.git",
MADMAX_PLOTTER_DIR,
],
"Could not clone madmax git repository",
cwd=os.fspath(plotters_root_path),
)
print("Installing git submodules.")
madmax_path: str = os.fspath(get_madmax_install_path(plotters_root_path))
run_command(
[
"git",
"submodule",
"update",
"--init",
"--recursive",
],
"Could not initialize git submodules",
cwd=madmax_path,
)
print("Running install script.")
run_command(["./make_devel.sh"], "Error while running install script", cwd=madmax_path)
else:
raise RuntimeError("Platform not supported yet for madmax plotter.")
progress = {
"[P1] Table 1 took": 0.01,
"[P1] Table 2 took": 0.06,
"[P1] Table 3 took": 0.12,
"[P1] Table 4 took": 0.2,
"[P1] Table 5 took": 0.28,
"[P1] Table 6 took": 0.36,
"[P1] Table 7 took": 0.42,
"[P2] Table 7 rewrite took": 0.43,
"[P2] Table 6 rewrite took": 0.48,
"[P2] Table 5 rewrite took": 0.51,
"[P2] Table 4 rewrite took": 0.55,
"[P2] Table 3 rewrite took": 0.58,
"[P2] Table 2 rewrite took": 0.61,
"[P3-2] Table 2 took": 0.66,
"[P3-2] Table 3 took": 0.73,
"[P3-2] Table 4 took": 0.79,
"[P3-2] Table 5 took": 0.85,
"[P3-2] Table 6 took": 0.92,
"[P3-2] Table 7 took": 0.98,
}
def dir_with_trailing_slash(dir: str) -> str:
return dir if dir[-1] == os.path.sep else dir + os.path.sep
def plot_madmax(args, salvia_root_path: Path, plotters_root_path: Path):
if sys.platform not in ["win32", "cygwin"]:
import resource
# madMAx has a ulimit -n requirement > 296:
# "Cannot open at least 296 files, please raise maximum open file limit in OS."
resource.setrlimit(resource.RLIMIT_NOFILE, (512, 512))
if not os.path.exists(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)):
print("Installing madmax plotter.")
try:
install_madmax(plotters_root_path)
except Exception as e:
print(f"Exception while installing madmax plotter: {e}")
return
plot_keys = asyncio.get_event_loop().run_until_complete(
resolve_plot_keys(
None if args.farmerkey == b"" else args.farmerkey.hex(),
None,
None if args.pool_key == b"" else args.pool_key.hex(),
None if args.contract == "" else args.contract,
salvia_root_path,
log,
args.connect_to_daemon,
)
)
call_args = []
call_args.append(os.fspath(get_madmax_executable_path_for_ksize(plotters_root_path, args.size)))
call_args.append("-f")
call_args.append(bytes(plot_keys.farmer_public_key).hex())
if plot_keys.pool_public_key is not None:
call_args.append("-p")
call_args.append(bytes(plot_keys.pool_public_key).hex())
call_args.append("-t")
# s if s[-1] == os.path.sep else s + os.path.sep
call_args.append(dir_with_trailing_slash(args.tmpdir))
call_args.append("-2")
call_args.append(dir_with_trailing_slash(args.tmpdir2))
call_args.append("-d")
call_args.append(dir_with_trailing_slash(args.finaldir))
if plot_keys.pool_contract_address is not None:
call_args.append("-c")
call_args.append(plot_keys.pool_contract_address)
call_args.append("-n")
call_args.append(str(args.count))
call_args.append("-r")
call_args.append(str(args.threads))
call_args.append("-u")
call_args.append(str(args.buckets))
call_args.append("-v")
call_args.append(str(args.buckets3))
call_args.append("-w")
call_args.append(str(int(args.waitforcopy)))
call_args.append("-K")
call_args.append(str(args.rmulti2))
if args.size != 32:
call_args.append("-k")
call_args.append(str(args.size))
try:
loop = asyncio.get_event_loop()
loop.run_until_complete(run_plotter(call_args, progress))
except Exception as e:
print(f"Exception while plotting: {type(e)} {e}")
print(f"Traceback: {traceback.format_exc()}")
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/plotters/madmax.py
| 0.484624 | 0.173638 |
madmax.py
|
pypi
|
import logging
from dataclasses import dataclass
from enum import Enum
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
from blspy import G1Element, PrivateKey
from chiapos import DiskProver
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.config import load_config, save_config
log = logging.getLogger(__name__)
@dataclass
class PlotsRefreshParameter:
interval_seconds: int = 120
retry_invalid_seconds: int = 1200
batch_size: int = 300
batch_sleep_milliseconds: int = 1
@dataclass
class PlotInfo:
prover: DiskProver
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
file_size: int
time_modified: float
class PlotRefreshEvents(Enum):
"""
This are the events the `PlotManager` will trigger with the callback during a full refresh cycle:
- started: This event indicates the start of a refresh cycle and contains the total number of files to
process in `PlotRefreshResult.remaining`.
- batch_processed: This event gets triggered if one batch has been processed. The values of
`PlotRefreshResult.{loaded|removed|processed}` are the results of this specific batch.
- done: This event gets triggered after all batches has been processed. The values of
`PlotRefreshResult.{loaded|removed|processed}` are the totals of all batches.
Note: The values of `PlotRefreshResult.{remaining|duration}` have the same meaning for all events.
"""
started = 0
batch_processed = 1
done = 2
@dataclass
class PlotRefreshResult:
loaded: int = 0
removed: int = 0
processed: int = 0
remaining: int = 0
duration: float = 0
def get_plot_directories(root_path: Path, config: Dict = None) -> List[str]:
if config is None:
config = load_config(root_path, "config.yaml")
return config["harvester"]["plot_directories"]
def get_plot_filenames(root_path: Path) -> Dict[Path, List[Path]]:
# Returns a map from directory to a list of all plots in the directory
all_files: Dict[Path, List[Path]] = {}
for directory_name in get_plot_directories(root_path):
directory = Path(directory_name).resolve()
all_files[directory] = get_filenames(directory)
return all_files
def add_plot_directory(root_path: Path, str_path: str) -> Dict:
log.debug(f"add_plot_directory {str_path}")
config = load_config(root_path, "config.yaml")
if str(Path(str_path).resolve()) not in get_plot_directories(root_path, config):
config["harvester"]["plot_directories"].append(str(Path(str_path).resolve()))
save_config(root_path, "config.yaml", config)
return config
def remove_plot_directory(root_path: Path, str_path: str) -> None:
log.debug(f"remove_plot_directory {str_path}")
config = load_config(root_path, "config.yaml")
str_paths: List[str] = get_plot_directories(root_path, config)
# If path str matches exactly, remove
if str_path in str_paths:
str_paths.remove(str_path)
# If path matches full path, remove
new_paths = [Path(sp).resolve() for sp in str_paths]
if Path(str_path).resolve() in new_paths:
new_paths.remove(Path(str_path).resolve())
config["harvester"]["plot_directories"] = [str(np) for np in new_paths]
save_config(root_path, "config.yaml", config)
def remove_plot(path: Path):
log.debug(f"remove_plot {str(path)}")
# Remove absolute and relative paths
if path.exists():
path.unlink()
def get_filenames(directory: Path) -> List[Path]:
try:
if not directory.exists():
log.warning(f"Directory: {directory} does not exist.")
return []
except OSError as e:
log.warning(f"Error checking if directory {directory} exists: {e}")
return []
all_files: List[Path] = []
try:
for child in directory.iterdir():
if not child.is_dir():
# If it is a file ending in .plot, add it - work around MacOS ._ files
if child.suffix == ".plot" and not child.name.startswith("._"):
all_files.append(child)
else:
log.debug(f"Not checking subdirectory {child}, subdirectories not added by default")
except Exception as e:
log.warning(f"Error reading directory {directory} {e}")
return all_files
def parse_plot_info(memo: bytes) -> Tuple[Union[G1Element, bytes32], G1Element, PrivateKey]:
# Parses the plot info bytes into keys
if len(memo) == (48 + 48 + 32):
# This is a public key memo
return (
G1Element.from_bytes(memo[:48]),
G1Element.from_bytes(memo[48:96]),
PrivateKey.from_bytes(memo[96:]),
)
elif len(memo) == (32 + 48 + 32):
# This is a pool_contract_puzzle_hash memo
return (
bytes32(memo[:32]),
G1Element.from_bytes(memo[32:80]),
PrivateKey.from_bytes(memo[80:]),
)
else:
raise ValueError(f"Invalid number of bytes {len(memo)}")
def stream_plot_info_pk(
pool_public_key: G1Element,
farmer_public_key: G1Element,
local_master_sk: PrivateKey,
):
# There are two ways to stream plot info: with a pool public key, or with a pool contract puzzle hash.
# This one streams the public key, into bytes
data = bytes(pool_public_key) + bytes(farmer_public_key) + bytes(local_master_sk)
assert len(data) == (48 + 48 + 32)
return data
def stream_plot_info_ph(
pool_contract_puzzle_hash: bytes32,
farmer_public_key: G1Element,
local_master_sk: PrivateKey,
):
# There are two ways to stream plot info: with a pool public key, or with a pool contract puzzle hash.
# This one streams the pool contract puzzle hash, into bytes
data = pool_contract_puzzle_hash + bytes(farmer_public_key) + bytes(local_master_sk)
assert len(data) == (32 + 48 + 32)
return data
def find_duplicate_plot_IDs(all_filenames=None) -> None:
if all_filenames is None:
all_filenames = []
plot_ids_set = set()
duplicate_plot_ids = set()
all_filenames_str: List[str] = []
for filename in all_filenames:
filename_str: str = str(filename)
all_filenames_str.append(filename_str)
filename_parts: List[str] = filename_str.split("-")
plot_id: str = filename_parts[-1]
# Skipped parsing and verifying plot ID for faster performance
# Skipped checking K size for faster performance
# Only checks end of filenames: 64 char plot ID + .plot = 69 characters
if len(plot_id) == 69:
if plot_id in plot_ids_set:
duplicate_plot_ids.add(plot_id)
else:
plot_ids_set.add(plot_id)
else:
log.warning(f"{filename} does not end with -[64 char plot ID].plot")
for plot_id in duplicate_plot_ids:
log_message: str = plot_id + " found in multiple files:\n"
duplicate_filenames: List[str] = [filename_str for filename_str in all_filenames_str if plot_id in filename_str]
for filename_str in duplicate_filenames:
log_message += "\t" + filename_str + "\n"
log.warning(f"{log_message}")
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/plotting/util.py
| 0.858926 | 0.335405 |
util.py
|
pypi
|
import logging
from datetime import datetime
from pathlib import Path
from secrets import token_bytes
from typing import Dict, List, Optional, Tuple
from blspy import AugSchemeMPL, G1Element, PrivateKey
from chiapos import DiskPlotter
from salvia.daemon.keychain_proxy import KeychainProxy, connect_to_keychain_and_validate, wrap_local_keychain
from salvia.plotting.util import add_plot_directory
from salvia.plotting.util import stream_plot_info_ph, stream_plot_info_pk
from salvia.types.blockchain_format.proof_of_space import ProofOfSpace
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.bech32m import decode_puzzle_hash
from salvia.util.config import config_path_for_filename, load_config
from salvia.util.keychain import Keychain
from salvia.util.path import mkdir
from salvia.wallet.derive_keys import master_sk_to_farmer_sk, master_sk_to_local_sk, master_sk_to_pool_sk
log = logging.getLogger(__name__)
class PlotKeys:
def __init__(
self,
farmer_public_key: G1Element,
pool_public_key: Optional[G1Element],
pool_contract_address: Optional[str],
):
self.farmer_public_key = farmer_public_key
self.pool_public_key = pool_public_key
self.pool_contract_address = pool_contract_address
@property
def pool_contract_puzzle_hash(self) -> Optional[bytes32]:
if self.pool_contract_address is not None:
return decode_puzzle_hash(self.pool_contract_address)
return None
class PlotKeysResolver:
def __init__(
self,
farmer_public_key: str,
alt_fingerprint: int,
pool_public_key: str,
pool_contract_address: str,
root_path: Path,
log: logging.Logger,
connect_to_daemon=False,
):
self.farmer_public_key = farmer_public_key
self.alt_fingerprint = alt_fingerprint
self.pool_public_key = pool_public_key
self.pool_contract_address = pool_contract_address
self.root_path = root_path
self.log = log
self.connect_to_daemon = connect_to_daemon
self.resolved_keys: Optional[PlotKeys] = None
async def resolve(self) -> PlotKeys:
if self.resolved_keys is not None:
return self.resolved_keys
keychain_proxy: Optional[KeychainProxy] = None
if self.connect_to_daemon:
keychain_proxy = await connect_to_keychain_and_validate(self.root_path, self.log)
else:
keychain_proxy = wrap_local_keychain(Keychain(), log=self.log)
farmer_public_key: G1Element
if self.farmer_public_key is not None:
farmer_public_key = G1Element.from_bytes(bytes.fromhex(self.farmer_public_key))
else:
farmer_public_key = await self.get_farmer_public_key(keychain_proxy)
pool_public_key: Optional[G1Element] = None
if self.pool_public_key is not None:
if self.pool_contract_address is not None:
raise RuntimeError("Choose one of pool_contract_address and pool_public_key")
pool_public_key = G1Element.from_bytes(bytes.fromhex(self.pool_public_key))
else:
if self.pool_contract_address is None:
# If nothing is set, farms to the provided key (or the first key)
pool_public_key = await self.get_pool_public_key(keychain_proxy)
self.resolved_keys = PlotKeys(farmer_public_key, pool_public_key, self.pool_contract_address)
return self.resolved_keys
async def get_sk(self, keychain_proxy: Optional[KeychainProxy] = None) -> Optional[Tuple[PrivateKey, bytes]]:
sk: Optional[PrivateKey] = None
if keychain_proxy:
try:
if self.alt_fingerprint is not None:
sk = await keychain_proxy.get_key_for_fingerprint(self.alt_fingerprint)
else:
sk = await keychain_proxy.get_first_private_key()
except Exception as e:
log.error(f"Keychain proxy failed with error: {e}")
else:
sk_ent: Optional[Tuple[PrivateKey, bytes]] = None
keychain: Keychain = Keychain()
if self.alt_fingerprint is not None:
sk_ent = keychain.get_private_key_by_fingerprint(self.alt_fingerprint)
else:
sk_ent = keychain.get_first_private_key()
if sk_ent:
sk = sk_ent[0]
return sk
async def get_farmer_public_key(self, keychain_proxy: Optional[KeychainProxy] = None) -> G1Element:
sk: Optional[PrivateKey] = await self.get_sk(keychain_proxy)
if sk is None:
raise RuntimeError(
"No keys, please run 'salvia keys add', 'salvia keys generate' or provide a public key with -f"
)
return master_sk_to_farmer_sk(sk).get_g1()
async def get_pool_public_key(self, keychain_proxy: Optional[KeychainProxy] = None) -> G1Element:
sk: Optional[PrivateKey] = await self.get_sk(keychain_proxy)
if sk is None:
raise RuntimeError(
"No keys, please run 'salvia keys add', 'salvia keys generate' or provide a public key with -p"
)
return master_sk_to_pool_sk(sk).get_g1()
async def resolve_plot_keys(
farmer_public_key: str,
alt_fingerprint: int,
pool_public_key: str,
pool_contract_address: str,
root_path: Path,
log: logging.Logger,
connect_to_daemon=False,
) -> PlotKeys:
return await PlotKeysResolver(
farmer_public_key, alt_fingerprint, pool_public_key, pool_contract_address, root_path, log, connect_to_daemon
).resolve()
async def create_plots(
args, keys: PlotKeys, root_path, use_datetime=True, test_private_keys: Optional[List] = None
) -> Tuple[Dict[bytes32, Path], Dict[bytes32, Path]]:
config_filename = config_path_for_filename(root_path, "config.yaml")
config = load_config(root_path, config_filename)
if args.tmp2_dir is None:
args.tmp2_dir = args.tmp_dir
assert (keys.pool_public_key is None) != (keys.pool_contract_puzzle_hash is None)
num = args.num
if args.size < config["min_mainnet_k_size"] and test_private_keys is None:
log.warning(f"Creating plots with size k={args.size}, which is less than the minimum required for mainnet")
if args.size < 22:
log.warning("k under 22 is not supported. Increasing k to 22")
args.size = 22
if keys.pool_public_key is not None:
log.info(
f"Creating {num} plots of size {args.size}, pool public key: "
f"{bytes(keys.pool_public_key).hex()} farmer public key: {bytes(keys.farmer_public_key).hex()}"
)
else:
assert keys.pool_contract_puzzle_hash is not None
log.info(
f"Creating {num} plots of size {args.size}, pool contract address: "
f"{keys.pool_contract_address} farmer public key: {bytes(keys.farmer_public_key).hex()}"
)
tmp_dir_created = False
if not args.tmp_dir.exists():
mkdir(args.tmp_dir)
tmp_dir_created = True
tmp2_dir_created = False
if not args.tmp2_dir.exists():
mkdir(args.tmp2_dir)
tmp2_dir_created = True
mkdir(args.final_dir)
created_plots: Dict[bytes32, Path] = {}
existing_plots: Dict[bytes32, Path] = {}
for i in range(num):
# Generate a random master secret key
if test_private_keys is not None:
assert len(test_private_keys) == num
sk: PrivateKey = test_private_keys[i]
else:
sk = AugSchemeMPL.key_gen(token_bytes(32))
# The plot public key is the combination of the harvester and farmer keys
# New plots will also include a taproot of the keys, for extensibility
include_taproot: bool = keys.pool_contract_puzzle_hash is not None
plot_public_key = ProofOfSpace.generate_plot_public_key(
master_sk_to_local_sk(sk).get_g1(), keys.farmer_public_key, include_taproot
)
# The plot id is based on the harvester, farmer, and pool keys
if keys.pool_public_key is not None:
plot_id: bytes32 = ProofOfSpace.calculate_plot_id_pk(keys.pool_public_key, plot_public_key)
plot_memo: bytes32 = stream_plot_info_pk(keys.pool_public_key, keys.farmer_public_key, sk)
else:
assert keys.pool_contract_puzzle_hash is not None
plot_id = ProofOfSpace.calculate_plot_id_ph(keys.pool_contract_puzzle_hash, plot_public_key)
plot_memo = stream_plot_info_ph(keys.pool_contract_puzzle_hash, keys.farmer_public_key, sk)
if args.plotid is not None:
log.info(f"Debug plot ID: {args.plotid}")
plot_id = bytes32(bytes.fromhex(args.plotid))
if args.memo is not None:
log.info(f"Debug memo: {args.memo}")
plot_memo = bytes.fromhex(args.memo)
# Uncomment next two lines if memo is needed for dev debug
plot_memo_str: str = plot_memo.hex()
log.info(f"Memo: {plot_memo_str}")
dt_string = datetime.now().strftime("%Y-%m-%d-%H-%M")
if use_datetime:
filename: str = f"plot-k{args.size}-{dt_string}-{plot_id}.plot"
else:
filename = f"plot-k{args.size}-{plot_id}.plot"
full_path: Path = args.final_dir / filename
resolved_final_dir: str = str(Path(args.final_dir).resolve())
plot_directories_list: str = config["harvester"]["plot_directories"]
if args.exclude_final_dir:
log.info(f"NOT adding directory {resolved_final_dir} to harvester for farming")
if resolved_final_dir in plot_directories_list:
log.warning(f"Directory {resolved_final_dir} already exists for harvester, please remove it manually")
else:
if resolved_final_dir not in plot_directories_list:
# Adds the directory to the plot directories if it is not present
log.info(f"Adding directory {resolved_final_dir} to harvester for farming")
config = add_plot_directory(root_path, resolved_final_dir)
if not full_path.exists():
log.info(f"Starting plot {i + 1}/{num}")
# Creates the plot. This will take a long time for larger plots.
plotter: DiskPlotter = DiskPlotter()
plotter.create_plot_disk(
str(args.tmp_dir),
str(args.tmp2_dir),
str(args.final_dir),
filename,
args.size,
plot_memo,
plot_id,
args.buffer,
args.buckets,
args.stripe_size,
args.num_threads,
args.nobitfield,
)
created_plots[plot_id] = full_path
else:
log.info(f"Plot {filename} already exists")
existing_plots[plot_id] = full_path
log.info("Summary:")
if tmp_dir_created:
try:
args.tmp_dir.rmdir()
except Exception:
log.info(f"warning: did not remove primary temporary folder {args.tmp_dir}, it may not be empty.")
if tmp2_dir_created:
try:
args.tmp2_dir.rmdir()
except Exception:
log.info(f"warning: did not remove secondary temporary folder {args.tmp2_dir}, it may not be empty.")
log.info(f"Created a total of {len(created_plots)} new plots")
for created_path in created_plots.values():
log.info(created_path.name)
return created_plots, existing_plots
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/plotting/create_plots.py
| 0.816406 | 0.205217 |
create_plots.py
|
pypi
|
from typing import List, Optional
from salvia.consensus.block_record import BlockRecord
from salvia.full_node.full_node_api import FullNodeAPI
from salvia.protocols.full_node_protocol import RespondBlock
from salvia.simulator.simulator_protocol import FarmNewBlockProtocol, ReorgProtocol
from salvia.types.full_block import FullBlock
from salvia.util.api_decorators import api_request
from salvia.util.ints import uint8
class FullNodeSimulator(FullNodeAPI):
def __init__(self, full_node, block_tools) -> None:
super().__init__(full_node)
self.bt = block_tools
self.full_node = full_node
self.config = full_node.config
self.time_per_block = None
if "simulation" in self.config and self.config["simulation"] is True:
self.use_current_time = True
else:
self.use_current_time = False
async def get_all_full_blocks(self) -> List[FullBlock]:
peak: Optional[BlockRecord] = self.full_node.blockchain.get_peak()
if peak is None:
return []
blocks = []
peak_block = await self.full_node.blockchain.get_full_block(peak.header_hash)
if peak_block is None:
return []
blocks.append(peak_block)
current = peak_block
while True:
prev = await self.full_node.blockchain.get_full_block(current.prev_header_hash)
if prev is not None:
current = prev
blocks.append(prev)
else:
break
blocks.reverse()
return blocks
@api_request
async def farm_new_transaction_block(self, request: FarmNewBlockProtocol):
async with self.full_node._blockchain_lock_high_priority:
self.log.info("Farming new block!")
current_blocks = await self.get_all_full_blocks()
if len(current_blocks) == 0:
genesis = self.bt.get_consecutive_blocks(uint8(1))[0]
await self.full_node.blockchain.receive_block(genesis)
peak = self.full_node.blockchain.get_peak()
assert peak is not None
curr: BlockRecord = peak
while not curr.is_transaction_block:
curr = self.full_node.blockchain.block_record(curr.prev_hash)
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(curr.header_hash)
if mempool_bundle is None:
spend_bundle = None
else:
spend_bundle = mempool_bundle[0]
current_blocks = await self.get_all_full_blocks()
target = request.puzzle_hash
more = self.bt.get_consecutive_blocks(
1,
time_per_block=self.time_per_block,
transaction_data=spend_bundle,
farmer_reward_puzzle_hash=target,
pool_reward_puzzle_hash=target,
block_list_input=current_blocks,
guarantee_transaction_block=True,
current_time=self.use_current_time,
previous_generator=self.full_node.full_node_store.previous_generator,
)
rr = RespondBlock(more[-1])
await self.full_node.respond_block(rr)
@api_request
async def farm_new_block(self, request: FarmNewBlockProtocol):
async with self.full_node._blockchain_lock_high_priority:
self.log.info("Farming new block!")
current_blocks = await self.get_all_full_blocks()
if len(current_blocks) == 0:
genesis = self.bt.get_consecutive_blocks(uint8(1))[0]
await self.full_node.blockchain.receive_block(genesis)
peak = self.full_node.blockchain.get_peak()
assert peak is not None
curr: BlockRecord = peak
while not curr.is_transaction_block:
curr = self.full_node.blockchain.block_record(curr.prev_hash)
mempool_bundle = await self.full_node.mempool_manager.create_bundle_from_mempool(curr.header_hash)
if mempool_bundle is None:
spend_bundle = None
else:
spend_bundle = mempool_bundle[0]
current_blocks = await self.get_all_full_blocks()
target = request.puzzle_hash
more = self.bt.get_consecutive_blocks(
1,
transaction_data=spend_bundle,
farmer_reward_puzzle_hash=target,
pool_reward_puzzle_hash=target,
block_list_input=current_blocks,
current_time=self.use_current_time,
)
rr: RespondBlock = RespondBlock(more[-1])
await self.full_node.respond_block(rr)
@api_request
async def reorg_from_index_to_new_index(self, request: ReorgProtocol):
new_index = request.new_index
old_index = request.old_index
coinbase_ph = request.puzzle_hash
current_blocks = await self.get_all_full_blocks()
block_count = new_index - old_index
more_blocks = self.bt.get_consecutive_blocks(
block_count,
farmer_reward_puzzle_hash=coinbase_ph,
pool_reward_puzzle_hash=coinbase_ph,
block_list_input=current_blocks[: old_index + 1],
force_overflow=True,
guarantee_transaction_block=True,
seed=32 * b"1",
)
for block in more_blocks:
await self.full_node.respond_block(RespondBlock(block))
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/simulator/full_node_simulator.py
| 0.778397 | 0.247067 |
full_node_simulator.py
|
pypi
|
def sexp(*argv):
return f'({f" ".join([str(arg) for arg in argv])})'
def cons(a, b):
return sexp("c", a, b)
def first(obj):
return sexp("f", obj)
def rest(obj):
return sexp("r", obj)
def nth(obj, *path):
if not path:
return obj
if path[0] < 0:
raise ValueError
if path[0] == 0:
return nth(first(obj), *path[1:])
else:
return nth(rest(obj), *(path[0] - 1,) + path[1:])
def args(*path, p=1):
if len(path) == 0:
return str(p)
if path[0] < 0:
raise ValueError
return args(*path[1:], p=(2 * p << path[0]) | (2 ** path[0] - 1))
def eval(code, env=args()):
return sexp("a", code, env)
def apply(name, argv):
return sexp(*[name] + list(argv))
def quote(obj):
return sexp("q .", obj)
nil = sexp()
def make_if(predicate, true_expression, false_expression):
return eval(apply("i", [predicate, quote(true_expression), quote(false_expression)]))
def make_list(*argv, terminator=nil):
if len(argv) == 0:
return terminator
else:
return cons(argv[0], make_list(*argv[1:], terminator=terminator))
def fail(*argv):
return apply("x", argv)
def sha256(*argv):
return apply("sha256", argv)
SHA256TREE_PROG = """
(a (q . (a 2 (c 2 (c 3 0))))
(c (q . (a (i (l 5)
(q . (sha256 (q . 2)
(a 2 (c 2 (c 9 0)))
(a 2 (c 2 (c 13 0)))))
(q . (sha256 (q . 1) 5))) 1)) %s))
"""
def sha256tree(*argv):
return SHA256TREE_PROG % argv[0]
def equal(*argv):
return apply("=", argv)
def multiply(*argv):
return apply("*", argv)
def add(*argv):
return apply("+", argv)
def subtract(*argv):
return apply("-", argv)
def is_zero(obj):
return equal(obj, quote("0"))
def iff(*argv):
return apply("i", argv)
def hexstr(str):
return quote(f"0x{str}")
def greater(*argv):
return apply(">", argv)
def string(str):
return f'"{str}"'
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/salvialisp.py
| 0.424531 | 0.339965 |
salvialisp.py
|
pypi
|
import asyncio
import logging
from typing import Dict, List, Optional, Tuple
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.header_block import HeaderBlock
from salvia.util.ints import uint32
log = logging.getLogger(__name__)
class WalletSyncStore:
# Whether or not we are syncing
sync_mode: bool
# Whether we are waiting for peaks (at the start of sync) or already syncing
waiting_for_peaks: bool
# Potential new peaks that we have received from others.
potential_peaks: Dict[bytes32, HeaderBlock]
# Blocks received from other peers during sync
potential_blocks: Dict[uint32, HeaderBlock]
# Event to signal when blocks are received at each height
potential_blocks_received: Dict[uint32, asyncio.Event]
# Blocks that we have finalized during sync, queue them up for adding after sync is done
potential_future_blocks: List[HeaderBlock]
# A map from height to header hash of blocks added to the chain
header_hashes_added: Dict[uint32, bytes32]
# map from potential peak to fork point
peak_fork_point: Dict[bytes32, uint32]
@classmethod
async def create(cls) -> "WalletSyncStore":
self = cls()
self.sync_mode = False
self.waiting_for_peaks = True
self.potential_peaks = {}
self.potential_blocks = {}
self.potential_blocks_received = {}
self.potential_future_blocks = []
self.header_hashes_added = {}
self.peak_fork_point = {}
return self
def set_sync_mode(self, sync_mode: bool) -> None:
self.sync_mode = sync_mode
def get_sync_mode(self) -> bool:
return self.sync_mode
async def clear_sync_info(self):
self.potential_peaks.clear()
self.potential_blocks.clear()
self.potential_blocks_received.clear()
self.potential_future_blocks.clear()
self.header_hashes_added.clear()
self.waiting_for_peaks = True
self.peak_fork_point = {}
def get_potential_peaks_tuples(self) -> List[Tuple[bytes32, HeaderBlock]]:
return list(self.potential_peaks.items())
def add_potential_peak(self, block: HeaderBlock) -> None:
self.potential_peaks[block.header_hash] = block
def add_potential_fork_point(self, peak_hash: bytes32, fork_point: uint32):
self.peak_fork_point[peak_hash] = fork_point
def get_potential_fork_point(self, peak_hash) -> Optional[uint32]:
if peak_hash in self.peak_fork_point:
return self.peak_fork_point[peak_hash]
else:
return None
def get_potential_peak(self, header_hash: bytes32) -> Optional[HeaderBlock]:
return self.potential_peaks.get(header_hash, None)
def add_potential_future_block(self, block: HeaderBlock):
self.potential_future_blocks.append(block)
def get_potential_future_blocks(self):
return self.potential_future_blocks
def add_header_hashes_added(self, height: uint32, header_hash: bytes32):
self.header_hashes_added[height] = header_hash
def get_header_hashes_added(self, height: uint32) -> Optional[bytes32]:
return self.header_hashes_added.get(height, None)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_sync_store.py
| 0.867148 | 0.2866 |
wallet_sync_store.py
|
pypi
|
from typing import List, Optional
import aiosqlite
from salvia.util.db_wrapper import DBWrapper
from salvia.util.ints import uint32
from salvia.wallet.util.wallet_types import WalletType
from salvia.wallet.wallet_info import WalletInfo
class WalletUserStore:
"""
WalletUserStore keeps track of all user created wallets and necessary smart-contract data
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS users_wallets("
"id INTEGER PRIMARY KEY AUTOINCREMENT,"
" name text,"
" wallet_type int,"
" data text)"
)
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS name on users_wallets(name)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS type on users_wallets(wallet_type)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS data on users_wallets(data)")
await self.db_connection.commit()
await self.init_wallet()
return self
async def init_wallet(self):
all_wallets = await self.get_all_wallet_info_entries()
if len(all_wallets) == 0:
await self.create_wallet("Salvia Wallet", WalletType.STANDARD_WALLET, "")
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM users_wallets")
await cursor.close()
await self.db_connection.commit()
async def create_wallet(
self, name: str, wallet_type: int, data: str, id: Optional[int] = None, in_transaction=False
) -> Optional[WalletInfo]:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT INTO users_wallets VALUES(?, ?, ?, ?)",
(id, name, wallet_type, data),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
return await self.get_last_wallet()
async def delete_wallet(self, id: int, in_transaction: bool):
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(f"DELETE FROM users_wallets where id={id}")
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def update_wallet(self, wallet_info: WalletInfo, in_transaction):
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT or REPLACE INTO users_wallets VALUES(?, ?, ?, ?)",
(
wallet_info.id,
wallet_info.name,
wallet_info.type,
wallet_info.data,
),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_last_wallet(self) -> Optional[WalletInfo]:
cursor = await self.db_connection.execute("SELECT MAX(id) FROM users_wallets;")
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return await self.get_wallet_by_id(row[0])
async def get_all_wallet_info_entries(self) -> List[WalletInfo]:
"""
Return a set containing all wallets
"""
cursor = await self.db_connection.execute("SELECT * from users_wallets")
rows = await cursor.fetchall()
await cursor.close()
result = []
for row in rows:
result.append(WalletInfo(row[0], row[1], row[2], row[3]))
return result
async def get_wallet_by_id(self, id: int) -> Optional[WalletInfo]:
"""
Return a wallet by id
"""
cursor = await self.db_connection.execute("SELECT * from users_wallets WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletInfo(row[0], row[1], row[2], row[3])
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_user_store.py
| 0.740644 | 0.258648 |
wallet_user_store.py
|
pypi
|
import time
from typing import Dict, List, Optional, Tuple
import aiosqlite
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.mempool_inclusion_status import MempoolInclusionStatus
from salvia.util.db_wrapper import DBWrapper
from salvia.util.errors import Err
from salvia.util.ints import uint8, uint32
from salvia.wallet.transaction_record import TransactionRecord
from salvia.wallet.util.transaction_type import TransactionType
class WalletTransactionStore:
"""
WalletTransactionStore stores transaction history for the wallet.
"""
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
tx_record_cache: Dict[bytes32, TransactionRecord]
tx_submitted: Dict[bytes32, Tuple[int, int]] # tx_id: [time submitted: count]
unconfirmed_for_wallet: Dict[int, Dict[bytes32, TransactionRecord]]
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = self.db_wrapper.db
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS transaction_record("
" transaction_record blob,"
" bundle_id text PRIMARY KEY," # NOTE: bundle_id is being stored as bytes, not hex
" confirmed_at_height bigint,"
" created_at_time bigint,"
" to_puzzle_hash text,"
" amount blob,"
" fee_amount blob,"
" confirmed int,"
" sent int,"
" wallet_id bigint,"
" trade_id text,"
" type int)"
)
)
# Useful for reorg lookups
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS tx_confirmed_index on transaction_record(confirmed_at_height)"
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS tx_created_index on transaction_record(created_at_time)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS tx_confirmed on transaction_record(confirmed)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS tx_sent on transaction_record(sent)")
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS tx_created_time on transaction_record(created_at_time)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS tx_type on transaction_record(type)")
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS tx_to_puzzle_hash on transaction_record(to_puzzle_hash)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on transaction_record(wallet_id)")
await self.db_connection.commit()
self.tx_record_cache = {}
self.tx_submitted = {}
self.unconfirmed_for_wallet = {}
await self.rebuild_tx_cache()
return self
async def rebuild_tx_cache(self):
# init cache here
all_records = await self.get_all_transactions()
self.tx_record_cache = {}
self.unconfirmed_for_wallet = {}
for record in all_records:
self.tx_record_cache[record.name] = record
if record.wallet_id not in self.unconfirmed_for_wallet:
self.unconfirmed_for_wallet[record.wallet_id] = {}
if not record.confirmed:
self.unconfirmed_for_wallet[record.wallet_id][record.name] = record
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM transaction_record")
await cursor.close()
await self.db_connection.commit()
async def add_transaction_record(self, record: TransactionRecord, in_transaction: bool) -> None:
"""
Store TransactionRecord in DB and Cache.
"""
self.tx_record_cache[record.name] = record
if record.wallet_id not in self.unconfirmed_for_wallet:
self.unconfirmed_for_wallet[record.wallet_id] = {}
unconfirmed_dict = self.unconfirmed_for_wallet[record.wallet_id]
if record.confirmed and record.name in unconfirmed_dict:
unconfirmed_dict.pop(record.name)
if not record.confirmed:
unconfirmed_dict[record.name] = record
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO transaction_record VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(
bytes(record),
record.name,
record.confirmed_at_height,
record.created_at_time,
record.to_puzzle_hash.hex(),
bytes(record.amount),
bytes(record.fee_amount),
int(record.confirmed),
record.sent,
record.wallet_id,
record.trade_id,
record.type,
),
)
await cursor.close()
if not in_transaction:
await self.db_connection.commit()
except BaseException:
if not in_transaction:
await self.rebuild_tx_cache()
raise
finally:
if not in_transaction:
self.db_wrapper.lock.release()
async def set_confirmed(self, tx_id: bytes32, height: uint32):
"""
Updates transaction to be confirmed.
"""
current: Optional[TransactionRecord] = await self.get_transaction_record(tx_id)
if current is None:
return None
if current.confirmed_at_height == height:
return
tx: TransactionRecord = TransactionRecord(
confirmed_at_height=height,
created_at_time=current.created_at_time,
to_puzzle_hash=current.to_puzzle_hash,
amount=current.amount,
fee_amount=current.fee_amount,
confirmed=True,
sent=current.sent,
spend_bundle=current.spend_bundle,
additions=current.additions,
removals=current.removals,
wallet_id=current.wallet_id,
sent_to=current.sent_to,
trade_id=None,
type=current.type,
name=current.name,
)
await self.add_transaction_record(tx, True)
async def increment_sent(
self,
tx_id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
err: Optional[Err],
) -> bool:
"""
Updates transaction sent count (Full Node has received spend_bundle and sent ack).
"""
current: Optional[TransactionRecord] = await self.get_transaction_record(tx_id)
if current is None:
return False
sent_to = current.sent_to.copy()
current_peers = set()
err_str = err.name if err is not None else None
append_data = (name, uint8(send_status.value), err_str)
for peer_id, status, error in sent_to:
current_peers.add(peer_id)
if name in current_peers:
sent_count = uint32(current.sent)
else:
sent_count = uint32(current.sent + 1)
sent_to.append(append_data)
tx: TransactionRecord = TransactionRecord(
confirmed_at_height=current.confirmed_at_height,
created_at_time=current.created_at_time,
to_puzzle_hash=current.to_puzzle_hash,
amount=current.amount,
fee_amount=current.fee_amount,
confirmed=current.confirmed,
sent=sent_count,
spend_bundle=current.spend_bundle,
additions=current.additions,
removals=current.removals,
wallet_id=current.wallet_id,
sent_to=sent_to,
trade_id=None,
type=current.type,
name=current.name,
)
await self.add_transaction_record(tx, False)
return True
async def tx_reorged(self, record: TransactionRecord):
"""
Updates transaction sent count to 0 and resets confirmation data
"""
tx: TransactionRecord = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=record.created_at_time,
to_puzzle_hash=record.to_puzzle_hash,
amount=record.amount,
fee_amount=record.fee_amount,
confirmed=False,
sent=uint32(0),
spend_bundle=record.spend_bundle,
additions=record.additions,
removals=record.removals,
wallet_id=record.wallet_id,
sent_to=[],
trade_id=None,
type=record.type,
name=record.name,
)
await self.add_transaction_record(tx, True)
async def get_transaction_record(self, tx_id: bytes32) -> Optional[TransactionRecord]:
"""
Checks DB and cache for TransactionRecord with id: id and returns it.
"""
if tx_id in self.tx_record_cache:
return self.tx_record_cache[tx_id]
# NOTE: bundle_id is being stored as bytes, not hex
cursor = await self.db_connection.execute("SELECT * from transaction_record WHERE bundle_id=?", (tx_id,))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
record = TransactionRecord.from_bytes(row[0])
return record
return None
async def get_not_sent(self) -> List[TransactionRecord]:
"""
Returns the list of transaction that have not been received by full node yet.
"""
current_time = int(time.time())
cursor = await self.db_connection.execute(
"SELECT * from transaction_record WHERE confirmed=?",
(0,),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
if record.name in self.tx_submitted:
time_submitted, count = self.tx_submitted[record.name]
if time_submitted < current_time - (60 * 10):
records.append(record)
self.tx_submitted[record.name] = current_time, 1
else:
if count < 5:
records.append(record)
self.tx_submitted[record.name] = time_submitted, (count + 1)
else:
records.append(record)
self.tx_submitted[record.name] = current_time, 1
return records
async def get_farming_rewards(self) -> List[TransactionRecord]:
"""
Returns the list of all farming rewards.
"""
fee_int = TransactionType.FEE_REWARD.value
pool_int = TransactionType.COINBASE_REWARD.value
cursor = await self.db_connection.execute(
"SELECT * from transaction_record WHERE confirmed=? and (type=? or type=?)", (1, fee_int, pool_int)
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_unconfirmed(self) -> List[TransactionRecord]:
"""
Returns the list of all transaction that have not yet been confirmed.
"""
cursor = await self.db_connection.execute("SELECT * from transaction_record WHERE confirmed=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
return records
async def get_unconfirmed_for_wallet(self, wallet_id: int) -> List[TransactionRecord]:
"""
Returns the list of transaction that have not yet been confirmed.
"""
if wallet_id in self.unconfirmed_for_wallet:
return list(self.unconfirmed_for_wallet[wallet_id].values())
else:
return []
async def get_transactions_between(self, wallet_id: int, start, end) -> List[TransactionRecord]:
"""Return a list of transaction between start and end index. List is in reverse chronological order.
start = 0 is most recent transaction
"""
limit = end - start
cursor = await self.db_connection.execute(
f"SELECT * from transaction_record where wallet_id=? and confirmed_at_height not in"
f" (select confirmed_at_height from transaction_record order by confirmed_at_height"
f" ASC LIMIT {start})"
f" order by confirmed_at_height DESC LIMIT {limit}",
(wallet_id,),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
records.reverse()
return records
async def get_transaction_count_for_wallet(self, wallet_id) -> int:
cursor = await self.db_connection.execute(
"SELECT COUNT(*) FROM transaction_record where wallet_id=?", (wallet_id,)
)
count_result = await cursor.fetchone()
if count_result is not None:
count = count_result[0]
else:
count = 0
await cursor.close()
return count
async def get_all_transactions_for_wallet(self, wallet_id: int, type: int = None) -> List[TransactionRecord]:
"""
Returns all stored transactions.
"""
if type is None:
cursor = await self.db_connection.execute(
"SELECT * from transaction_record where wallet_id=?", (wallet_id,)
)
else:
cursor = await self.db_connection.execute(
"SELECT * from transaction_record where wallet_id=? and type=?",
(
wallet_id,
type,
),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
cache_set = set()
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
cache_set.add(record.name)
return records
async def get_all_transactions(self) -> List[TransactionRecord]:
"""
Returns all stored transactions.
"""
cursor = await self.db_connection.execute("SELECT * from transaction_record")
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
return records
async def get_transaction_above(self, height: int) -> List[TransactionRecord]:
# Can be -1 (get all tx)
cursor = await self.db_connection.execute(
"SELECT * from transaction_record WHERE confirmed_at_height>?", (height,)
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TransactionRecord.from_bytes(row[0])
records.append(record)
return records
async def rollback_to_block(self, height: int):
# Delete from storage
to_delete = []
for tx in self.tx_record_cache.values():
if tx.confirmed_at_height > height:
to_delete.append(tx)
for tx in to_delete:
self.tx_record_cache.pop(tx.name)
c1 = await self.db_connection.execute("DELETE FROM transaction_record WHERE confirmed_at_height>?", (height,))
await c1.close()
async def delete_unconfirmed_transactions(self, wallet_id: int):
cursor = await self.db_connection.execute(
"DELETE FROM transaction_record WHERE confirmed=0 AND wallet_id=?", (wallet_id,)
)
await cursor.close()
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_transaction_store.py
| 0.75037 | 0.170543 |
wallet_transaction_store.py
|
pypi
|
import logging
from typing import List, Tuple, Dict, Optional
import aiosqlite
from salvia.types.coin_spend import CoinSpend
from salvia.util.db_wrapper import DBWrapper
from salvia.util.ints import uint32
log = logging.getLogger(__name__)
class WalletPoolStore:
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
_state_transitions_cache: Dict[int, List[Tuple[uint32, CoinSpend]]]
@classmethod
async def create(cls, wrapper: DBWrapper):
self = cls()
self.db_connection = wrapper.db
self.db_wrapper = wrapper
await self.db_connection.execute(
"CREATE TABLE IF NOT EXISTS pool_state_transitions(transition_index integer, wallet_id integer, "
"height bigint, coin_spend blob, PRIMARY KEY(transition_index, wallet_id))"
)
await self.db_connection.commit()
await self.rebuild_cache()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM interested_coins")
await cursor.close()
await self.db_connection.commit()
async def add_spend(
self,
wallet_id: int,
spend: CoinSpend,
height: uint32,
) -> None:
"""
Appends (or replaces) entries in the DB. The new list must be at least as long as the existing list, and the
parent of the first spend must already be present in the DB. Note that this is not committed to the DB
until db_wrapper.commit() is called. However it is written to the cache, so it can be fetched with
get_all_state_transitions.
"""
if wallet_id not in self._state_transitions_cache:
self._state_transitions_cache[wallet_id] = []
all_state_transitions: List[Tuple[uint32, CoinSpend]] = self.get_spends_for_wallet(wallet_id)
if (height, spend) in all_state_transitions:
return
if len(all_state_transitions) > 0:
if height < all_state_transitions[-1][0]:
raise ValueError("Height cannot go down")
if spend.coin.parent_coin_info != all_state_transitions[-1][1].coin.name():
raise ValueError("New spend does not extend")
all_state_transitions.append((height, spend))
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO pool_state_transitions VALUES (?, ?, ?, ?)",
(
len(all_state_transitions) - 1,
wallet_id,
height,
bytes(spend),
),
)
await cursor.close()
def get_spends_for_wallet(self, wallet_id: int) -> List[Tuple[uint32, CoinSpend]]:
"""
Retrieves all entries for a wallet ID from the cache, works even if commit is not called yet.
"""
return self._state_transitions_cache.get(wallet_id, [])
async def rebuild_cache(self) -> None:
"""
This resets the cache, and loads all entries from the DB. Any entries in the cache that were not committed
are removed. This can happen if a state transition in wallet_blockchain fails.
"""
cursor = await self.db_connection.execute("SELECT * FROM pool_state_transitions ORDER BY transition_index")
rows = await cursor.fetchall()
await cursor.close()
self._state_transitions_cache = {}
for row in rows:
_, wallet_id, height, coin_spend_bytes = row
coin_spend: CoinSpend = CoinSpend.from_bytes(coin_spend_bytes)
if wallet_id not in self._state_transitions_cache:
self._state_transitions_cache[wallet_id] = []
self._state_transitions_cache[wallet_id].append((height, coin_spend))
async def rollback(self, height: int, wallet_id_arg: int) -> None:
"""
Rollback removes all entries which have entry_height > height passed in. Note that this is not committed to the
DB until db_wrapper.commit() is called. However it is written to the cache, so it can be fetched with
get_all_state_transitions.
"""
for wallet_id, items in self._state_transitions_cache.items():
remove_index_start: Optional[int] = None
for i, (item_block_height, _) in enumerate(items):
if item_block_height > height and wallet_id == wallet_id_arg:
remove_index_start = i
break
if remove_index_start is not None:
del items[remove_index_start:]
cursor = await self.db_connection.execute(
"DELETE FROM pool_state_transitions WHERE height>? AND wallet_id=?", (height, wallet_id_arg)
)
await cursor.close()
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_pool_store.py
| 0.727589 | 0.200186 |
wallet_pool_store.py
|
pypi
|
import asyncio
import dataclasses
import logging
import multiprocessing
from concurrent.futures.process import ProcessPoolExecutor
from enum import Enum
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from salvia.consensus.block_header_validation import validate_finished_header_block, validate_unfinished_header_block
from salvia.consensus.block_record import BlockRecord
from salvia.consensus.blockchain_interface import BlockchainInterface
from salvia.consensus.constants import ConsensusConstants
from salvia.consensus.difficulty_adjustment import get_next_sub_slot_iters_and_difficulty
from salvia.consensus.find_fork_point import find_fork_point_in_chain
from salvia.consensus.full_block_to_block_record import block_to_block_record
from salvia.consensus.multiprocess_validation import PreValidationResult, pre_validate_blocks_multiprocessing
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.coin_spend import CoinSpend
from salvia.types.header_block import HeaderBlock
from salvia.types.unfinished_header_block import UnfinishedHeaderBlock
from salvia.util.errors import Err, ValidationError
from salvia.util.ints import uint32, uint64
from salvia.util.streamable import recurse_jsonify
from salvia.wallet.block_record import HeaderBlockRecord
from salvia.wallet.wallet_block_store import WalletBlockStore
from salvia.wallet.wallet_coin_store import WalletCoinStore
from salvia.wallet.wallet_pool_store import WalletPoolStore
from salvia.wallet.wallet_transaction_store import WalletTransactionStore
log = logging.getLogger(__name__)
class ReceiveBlockResult(Enum):
"""
When Blockchain.receive_block(b) is called, one of these results is returned,
showing whether the block was added to the chain (extending the peak),
and if not, why it was not added.
"""
NEW_PEAK = 1 # Added to the peak of the blockchain
ADDED_AS_ORPHAN = 2 # Added as an orphan/stale block (not a new peak of the chain)
INVALID_BLOCK = 3 # Block was not added because it was invalid
ALREADY_HAVE_BLOCK = 4 # Block is already present in this blockchain
DISCONNECTED_BLOCK = 5 # Block's parent (previous pointer) is not in this blockchain
class WalletBlockchain(BlockchainInterface):
constants: ConsensusConstants
constants_json: Dict
# peak of the blockchain
_peak_height: Optional[uint32]
# All blocks in peak path are guaranteed to be included, can include orphan blocks
__block_records: Dict[bytes32, BlockRecord]
# Defines the path from genesis to the peak, no orphan blocks
__height_to_hash: Dict[uint32, bytes32]
# all hashes of blocks in block_record by height, used for garbage collection
__heights_in_cache: Dict[uint32, Set[bytes32]]
# All sub-epoch summaries that have been included in the blockchain from the beginning until and including the peak
# (height_included, SubEpochSummary). Note: ONLY for the blocks in the path to the peak
__sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
# Stores
coin_store: WalletCoinStore
tx_store: WalletTransactionStore
pool_store: WalletPoolStore
block_store: WalletBlockStore
# Used to verify blocks in parallel
pool: ProcessPoolExecutor
new_transaction_block_callback: Any
reorg_rollback: Any
wallet_state_manager_lock: asyncio.Lock
# Whether blockchain is shut down or not
_shut_down: bool
# Lock to prevent simultaneous reads and writes
lock: asyncio.Lock
log: logging.Logger
@staticmethod
async def create(
block_store: WalletBlockStore,
coin_store: WalletCoinStore,
tx_store: WalletTransactionStore,
pool_store: WalletPoolStore,
consensus_constants: ConsensusConstants,
new_transaction_block_callback: Callable, # f(removals: List[Coin], additions: List[Coin], height: uint32)
reorg_rollback: Callable,
lock: asyncio.Lock,
):
"""
Initializes a blockchain with the BlockRecords from disk, assuming they have all been
validated. Uses the genesis block given in override_constants, or as a fallback,
in the consensus constants config.
"""
self = WalletBlockchain()
self.lock = asyncio.Lock()
self.coin_store = coin_store
self.tx_store = tx_store
self.pool_store = pool_store
cpu_count = multiprocessing.cpu_count()
if cpu_count > 61:
cpu_count = 61 # Windows Server 2016 has an issue https://bugs.python.org/issue26903
num_workers = max(cpu_count - 2, 1)
self.pool = ProcessPoolExecutor(max_workers=num_workers)
log.info(f"Started {num_workers} processes for block validation")
self.constants = consensus_constants
self.constants_json = recurse_jsonify(dataclasses.asdict(self.constants))
self.block_store = block_store
self._shut_down = False
self.new_transaction_block_callback = new_transaction_block_callback
self.reorg_rollback = reorg_rollback
self.log = logging.getLogger(__name__)
self.wallet_state_manager_lock = lock
await self._load_chain_from_store()
return self
def shut_down(self):
self._shut_down = True
self.pool.shutdown(wait=True)
async def _load_chain_from_store(self) -> None:
"""
Initializes the state of the Blockchain class from the database.
"""
height_to_hash, sub_epoch_summaries = await self.block_store.get_peak_heights_dicts()
self.__height_to_hash = height_to_hash
self.__sub_epoch_summaries = sub_epoch_summaries
self.__block_records = {}
self.__heights_in_cache = {}
blocks, peak = await self.block_store.get_block_records_close_to_peak(self.constants.BLOCKS_CACHE_SIZE)
for block_record in blocks.values():
self.add_block_record(block_record)
if len(blocks) == 0:
assert peak is None
self._peak_height = None
return None
assert peak is not None
self._peak_height = self.block_record(peak).height
assert len(self.__height_to_hash) == self._peak_height + 1
def get_peak(self) -> Optional[BlockRecord]:
"""
Return the peak of the blockchain
"""
if self._peak_height is None:
return None
return self.height_to_block_record(self._peak_height)
async def receive_block(
self,
header_block_record: HeaderBlockRecord,
pre_validation_result: Optional[PreValidationResult] = None,
trusted: bool = False,
fork_point_with_peak: Optional[uint32] = None,
additional_coin_spends: List[CoinSpend] = None,
) -> Tuple[ReceiveBlockResult, Optional[Err], Optional[uint32]]:
"""
Adds a new block into the blockchain, if it's valid and connected to the current
blockchain, regardless of whether it is the child of a head, or another block.
Returns a header if block is added to head. Returns an error if the block is
invalid. Also returns the fork height, in the case of a new peak.
"""
if additional_coin_spends is None:
additional_coin_spends = []
block = header_block_record.header
genesis: bool = block.height == 0
if self.contains_block(block.header_hash):
return ReceiveBlockResult.ALREADY_HAVE_BLOCK, None, None
if not self.contains_block(block.prev_header_hash) and not genesis:
return (
ReceiveBlockResult.DISCONNECTED_BLOCK,
Err.INVALID_PREV_BLOCK_HASH,
None,
)
if block.height == 0:
prev_b: Optional[BlockRecord] = None
else:
prev_b = self.block_record(block.prev_header_hash)
sub_slot_iters, difficulty = get_next_sub_slot_iters_and_difficulty(
self.constants, len(block.finished_sub_slots) > 0, prev_b, self
)
if trusted is False and pre_validation_result is None:
required_iters, error = validate_finished_header_block(
self.constants, self, block, False, difficulty, sub_slot_iters
)
elif trusted:
unfinished_header_block = UnfinishedHeaderBlock(
block.finished_sub_slots,
block.reward_chain_block.get_unfinished(),
block.challenge_chain_sp_proof,
block.reward_chain_sp_proof,
block.foliage,
block.foliage_transaction_block,
block.transactions_filter,
)
required_iters, val_error = validate_unfinished_header_block(
self.constants, self, unfinished_header_block, False, difficulty, sub_slot_iters, False, True
)
error = val_error if val_error is not None else None
else:
assert pre_validation_result is not None
required_iters = pre_validation_result.required_iters
error = (
ValidationError(Err(pre_validation_result.error)) if pre_validation_result.error is not None else None
)
if error is not None:
return ReceiveBlockResult.INVALID_BLOCK, error.code, None
assert required_iters is not None
block_record = block_to_block_record(
self.constants,
self,
required_iters,
None,
block,
)
heights_changed: Set[Tuple[uint32, Optional[bytes32]]] = set()
# Always add the block to the database
async with self.wallet_state_manager_lock:
async with self.block_store.db_wrapper.lock:
try:
await self.block_store.db_wrapper.begin_transaction()
await self.block_store.add_block_record(header_block_record, block_record, additional_coin_spends)
self.add_block_record(block_record)
self.clean_block_record(block_record.height - self.constants.BLOCKS_CACHE_SIZE)
fork_height, records_to_add = await self._reconsider_peak(
block_record, genesis, fork_point_with_peak, additional_coin_spends, heights_changed
)
for record in records_to_add:
if record.sub_epoch_summary_included is not None:
self.__sub_epoch_summaries[record.height] = record.sub_epoch_summary_included
await self.block_store.db_wrapper.commit_transaction()
except BaseException as e:
self.log.error(f"Error during db transaction: {e}")
if self.block_store.db_wrapper.db._connection is not None:
await self.block_store.db_wrapper.rollback_transaction()
self.remove_block_record(block_record.header_hash)
self.block_store.rollback_cache_block(block_record.header_hash)
await self.coin_store.rebuild_wallet_cache()
await self.tx_store.rebuild_tx_cache()
await self.pool_store.rebuild_cache()
for height, replaced in heights_changed:
# If it was replaced change back to the previous value otherwise pop the change
if replaced is not None:
self.__height_to_hash[height] = replaced
else:
self.__height_to_hash.pop(height)
raise
if fork_height is not None:
self.log.info(f"💰 Updated wallet peak to height {block_record.height}, weight {block_record.weight}, ")
return ReceiveBlockResult.NEW_PEAK, None, fork_height
else:
return ReceiveBlockResult.ADDED_AS_ORPHAN, None, None
async def _reconsider_peak(
self,
block_record: BlockRecord,
genesis: bool,
fork_point_with_peak: Optional[uint32],
additional_coin_spends_from_wallet: Optional[List[CoinSpend]],
heights_changed: Set[Tuple[uint32, Optional[bytes32]]],
) -> Tuple[Optional[uint32], List[BlockRecord]]:
"""
When a new block is added, this is called, to check if the new block is the new peak of the chain.
This also handles reorgs by reverting blocks which are not in the heaviest chain.
It returns the height of the fork between the previous chain and the new chain, or returns
None if there was no update to the heaviest chain.
"""
peak = self.get_peak()
if genesis:
if peak is None:
block: Optional[HeaderBlockRecord] = await self.block_store.get_header_block_record(
block_record.header_hash
)
assert block is not None
replaced = None
if uint32(0) in self.__height_to_hash:
replaced = self.__height_to_hash[uint32(0)]
self.__height_to_hash[uint32(0)] = block.header_hash
heights_changed.add((uint32(0), replaced))
assert len(block.additions) == 0 and len(block.removals) == 0
await self.new_transaction_block_callback(block.removals, block.additions, block_record, [])
self._peak_height = uint32(0)
return uint32(0), [block_record]
return None, []
assert peak is not None
if block_record.weight > peak.weight:
# Find the fork. if the block is just being appended, it will return the peak
# If no blocks in common, returns -1, and reverts all blocks
if fork_point_with_peak is not None:
fork_h: int = fork_point_with_peak
else:
fork_h = find_fork_point_in_chain(self, block_record, peak)
# Rollback to fork
self.log.debug(f"fork_h: {fork_h}, SB: {block_record.height}, peak: {peak.height}")
if block_record.prev_hash != peak.header_hash:
await self.reorg_rollback(fork_h)
# Rollback sub_epoch_summaries
heights_to_delete = []
for ses_included_height in self.__sub_epoch_summaries.keys():
if ses_included_height > fork_h:
heights_to_delete.append(ses_included_height)
for height in heights_to_delete:
del self.__sub_epoch_summaries[height]
# Collect all blocks from fork point to new peak
blocks_to_add: List[Tuple[HeaderBlockRecord, BlockRecord, List[CoinSpend]]] = []
curr = block_record.header_hash
while fork_h < 0 or curr != self.height_to_hash(uint32(fork_h)):
fetched_header_block: Optional[HeaderBlockRecord] = await self.block_store.get_header_block_record(curr)
fetched_block_record: Optional[BlockRecord] = await self.block_store.get_block_record(curr)
if curr == block_record.header_hash:
additional_coin_spends = additional_coin_spends_from_wallet
else:
additional_coin_spends = await self.block_store.get_additional_coin_spends(curr)
if additional_coin_spends is None:
additional_coin_spends = []
assert fetched_header_block is not None
assert fetched_block_record is not None
blocks_to_add.append((fetched_header_block, fetched_block_record, additional_coin_spends))
if fetched_header_block.height == 0:
# Doing a full reorg, starting at height 0
break
curr = fetched_block_record.prev_hash
records_to_add: List[BlockRecord] = []
for fetched_header_block, fetched_block_record, additional_coin_spends in reversed(blocks_to_add):
replaced = None
if fetched_block_record.height in self.__height_to_hash:
replaced = self.__height_to_hash[fetched_block_record.height]
self.__height_to_hash[fetched_block_record.height] = fetched_block_record.header_hash
heights_changed.add((fetched_block_record.height, replaced))
records_to_add.append(fetched_block_record)
if fetched_block_record.is_transaction_block:
await self.new_transaction_block_callback(
fetched_header_block.removals,
fetched_header_block.additions,
fetched_block_record,
additional_coin_spends,
)
# Changes the peak to be the new peak
await self.block_store.set_peak(block_record.header_hash)
self._peak_height = block_record.height
if fork_h < 0:
return None, records_to_add
return uint32(fork_h), records_to_add
# This is not a heavier block than the heaviest we have seen, so we don't change the coin set
return None, []
def get_next_difficulty(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.DIFFICULTY_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[1]
def get_next_slot_iters(self, header_hash: bytes32, new_slot: bool) -> uint64:
assert self.contains_block(header_hash)
curr = self.block_record(header_hash)
if curr.height <= 2:
return self.constants.SUB_SLOT_ITERS_STARTING
return get_next_sub_slot_iters_and_difficulty(self.constants, new_slot, curr, self)[0]
async def pre_validate_blocks_multiprocessing(
self, blocks: List[HeaderBlock], batch_size: int = 4
) -> Optional[List[PreValidationResult]]:
return await pre_validate_blocks_multiprocessing(
self.constants, self.constants_json, self, blocks, self.pool, True, {}, None, batch_size
)
def contains_block(self, header_hash: bytes32) -> bool:
"""
True if we have already added this block to the chain. This may return false for orphan blocks
that we have added but no longer keep in memory.
"""
return header_hash in self.__block_records
def block_record(self, header_hash: bytes32) -> BlockRecord:
return self.__block_records[header_hash]
def height_to_block_record(self, height: uint32, check_db=False) -> BlockRecord:
header_hash = self.height_to_hash(height)
return self.block_record(header_hash)
def get_ses_heights(self) -> List[uint32]:
return sorted(self.__sub_epoch_summaries.keys())
def get_ses(self, height: uint32) -> SubEpochSummary:
return self.__sub_epoch_summaries[height]
def height_to_hash(self, height: uint32) -> Optional[bytes32]:
return self.__height_to_hash[height]
def contains_height(self, height: uint32) -> bool:
return height in self.__height_to_hash
def get_peak_height(self) -> Optional[uint32]:
return self._peak_height
async def warmup(self, fork_point: uint32):
"""
Loads blocks into the cache. The blocks loaded include all blocks from
fork point - BLOCKS_CACHE_SIZE up to and including the fork_point.
Args:
fork_point: the last block height to load in the cache
"""
if self._peak_height is None:
return None
blocks = await self.block_store.get_block_records_in_range(
fork_point - self.constants.BLOCKS_CACHE_SIZE, self._peak_height
)
for block_record in blocks.values():
self.add_block_record(block_record)
def clean_block_record(self, height: int):
"""
Clears all block records in the cache which have block_record < height.
Args:
height: Minimum height that we need to keep in the cache
"""
if height < 0:
return None
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
while blocks_to_remove is not None and height >= 0:
for header_hash in blocks_to_remove:
del self.__block_records[header_hash]
del self.__heights_in_cache[uint32(height)] # remove height from heights in cache
if height == 0:
break
height -= 1
blocks_to_remove = self.__heights_in_cache.get(uint32(height), None)
def clean_block_records(self):
"""
Cleans the cache so that we only maintain relevant blocks.
This removes block records that have height < peak - BLOCKS_CACHE_SIZE.
These blocks are necessary for calculating future difficulty adjustments.
"""
if len(self.__block_records) < self.constants.BLOCKS_CACHE_SIZE:
return None
peak = self.get_peak()
assert peak is not None
if peak.height - self.constants.BLOCKS_CACHE_SIZE < 0:
return None
self.clean_block_record(peak.height - self.constants.BLOCKS_CACHE_SIZE)
async def get_block_records_in_range(self, start: int, stop: int) -> Dict[bytes32, BlockRecord]:
return await self.block_store.get_block_records_in_range(start, stop)
async def get_header_blocks_in_range(
self, start: int, stop: int, tx_filter: bool = True
) -> Dict[bytes32, HeaderBlock]:
return await self.block_store.get_header_blocks_in_range(start, stop)
async def get_block_record_from_db(self, header_hash: bytes32) -> Optional[BlockRecord]:
if header_hash in self.__block_records:
return self.__block_records[header_hash]
return await self.block_store.get_block_record(header_hash)
def remove_block_record(self, header_hash: bytes32):
sbr = self.block_record(header_hash)
del self.__block_records[header_hash]
self.__heights_in_cache[sbr.height].remove(header_hash)
def add_block_record(self, block_record: BlockRecord):
self.__block_records[block_record.header_hash] = block_record
if block_record.height not in self.__heights_in_cache.keys():
self.__heights_in_cache[block_record.height] = set()
self.__heights_in_cache[block_record.height].add(block_record.header_hash)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_blockchain.py
| 0.79534 | 0.281875 |
wallet_blockchain.py
|
pypi
|
from dataclasses import dataclass
from typing import Dict, List, Optional, Tuple
import aiosqlite
from salvia.consensus.block_record import BlockRecord
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.sub_epoch_summary import SubEpochSummary
from salvia.types.coin_spend import CoinSpend
from salvia.types.header_block import HeaderBlock
from salvia.util.db_wrapper import DBWrapper
from salvia.util.ints import uint32, uint64
from salvia.util.lru_cache import LRUCache
from salvia.util.streamable import Streamable, streamable
from salvia.wallet.block_record import HeaderBlockRecord
@dataclass(frozen=True)
@streamable
class AdditionalCoinSpends(Streamable):
coin_spends_list: List[CoinSpend]
class WalletBlockStore:
"""
This object handles HeaderBlocks and Blocks stored in DB used by wallet.
"""
db: aiosqlite.Connection
db_wrapper: DBWrapper
block_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint, weight bigint, total_iters text,"
"block blob, sub_epoch_summary blob, is_peak tinyint)"
)
await self.db.execute(
"CREATE TABLE IF NOT EXISTS additional_coin_spends(header_hash text PRIMARY KEY, spends_list_blob blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak)")
await self.db.commit()
self.block_cache = LRUCache(1000)
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def add_block_record(
self,
header_block_record: HeaderBlockRecord,
block_record: BlockRecord,
additional_coin_spends: List[CoinSpend],
):
"""
Adds a block record to the database. This block record is assumed to be connected
to the chain, but it may or may not be in the LCA path.
"""
cached = self.block_cache.get(header_block_record.header_hash)
if cached is not None:
# Since write to db can fail, we remove from cache here to avoid potential inconsistency
# Adding to cache only from reading
self.block_cache.put(header_block_record.header_hash, None)
if header_block_record.header.foliage_transaction_block is not None:
timestamp = header_block_record.header.foliage_transaction_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?)",
(
header_block_record.header_hash.hex(),
header_block_record.height,
timestamp,
bytes(header_block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?, ?, ?, ?,?)",
(
header_block_record.header.header_hash.hex(),
header_block_record.header.prev_header_hash.hex(),
header_block_record.header.height,
header_block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
header_block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
),
)
await cursor_2.close()
if len(additional_coin_spends) > 0:
blob: bytes = bytes(AdditionalCoinSpends(additional_coin_spends))
cursor_3 = await self.db.execute(
"INSERT OR REPLACE INTO additional_coin_spends VALUES(?, ?)",
(header_block_record.header_hash.hex(), blob),
)
await cursor_3.close()
async def get_header_block_at(self, heights: List[uint32]) -> List[HeaderBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from header_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
"""Gets a block record from the database, if present"""
cached = self.block_cache.get(header_hash)
if cached is not None:
return cached
cursor = await self.db.execute("SELECT block from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr: HeaderBlockRecord = HeaderBlockRecord.from_bytes(row[0])
self.block_cache.put(hbr.header_hash, hbr)
return hbr
else:
return None
async def get_additional_coin_spends(self, header_hash: bytes32) -> Optional[List[CoinSpend]]:
cursor = await self.db.execute(
"SELECT spends_list_blob from additional_coin_spends WHERE header_hash=?", (header_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
coin_spends: AdditionalCoinSpends = AdditionalCoinSpends.from_bytes(row[0])
return coin_spends.coin_spends_list
else:
return None
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
cursor = await self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records(
self,
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
cursor = await self.db.execute("SELECT header_hash, block, is_peak from block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash_bytes, block_record_bytes, is_peak = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
if is_peak:
assert peak is None # Sanity check, only one peak
peak = header_hash
return ret, peak
def rollback_cache_block(self, header_hash: bytes32):
self.block_cache.remove(header_hash)
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, None
header_hash_bytes, peak_height = row
peak: bytes32 = bytes32(bytes.fromhex(header_hash_bytes))
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak_height - blocks_n}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret, peak
async def get_header_blocks_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, HeaderBlock]:
formatted_str = f"SELECT header_hash, block from header_blocks WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, HeaderBlock] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = HeaderBlock.from_bytes(block_record_bytes)
return ret
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
cursor = await self.db.execute(formatted_str)
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, BlockRecord] = {}
for row in rows:
header_hash_bytes, block_record_bytes = row
header_hash = bytes.fromhex(header_hash_bytes)
ret[header_hash] = BlockRecord.from_bytes(block_record_bytes)
return ret
async def get_peak_heights_dicts(self) -> Tuple[Dict[uint32, bytes32], Dict[uint32, SubEpochSummary]]:
"""
Returns a dictionary with all blocks, as well as the header hash of the peak,
if present.
"""
res = await self.db.execute("SELECT header_hash from block_records WHERE is_peak = 1")
row = await res.fetchone()
await res.close()
if row is None:
return {}, {}
peak: bytes32 = bytes.fromhex(row[0])
cursor = await self.db.execute("SELECT header_hash,prev_hash,height,sub_epoch_summary from block_records")
rows = await cursor.fetchall()
await cursor.close()
hash_to_prev_hash: Dict[bytes32, bytes32] = {}
hash_to_height: Dict[bytes32, uint32] = {}
hash_to_summary: Dict[bytes32, SubEpochSummary] = {}
for row in rows:
hash_to_prev_hash[bytes.fromhex(row[0])] = bytes.fromhex(row[1])
hash_to_height[bytes.fromhex(row[0])] = row[2]
if row[3] is not None:
hash_to_summary[bytes.fromhex(row[0])] = SubEpochSummary.from_bytes(row[3])
height_to_hash: Dict[uint32, bytes32] = {}
sub_epoch_summaries: Dict[uint32, SubEpochSummary] = {}
curr_header_hash = peak
curr_height = hash_to_height[curr_header_hash]
while True:
height_to_hash[curr_height] = curr_header_hash
if curr_header_hash in hash_to_summary:
sub_epoch_summaries[curr_height] = hash_to_summary[curr_header_hash]
if curr_height == 0:
break
curr_header_hash = hash_to_prev_hash[curr_header_hash]
curr_height = hash_to_height[curr_header_hash]
return height_to_hash, sub_epoch_summaries
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_block_store.py
| 0.871939 | 0.171859 |
wallet_block_store.py
|
pypi
|
from typing import List, Optional
import aiosqlite
from salvia.util.db_wrapper import DBWrapper
from salvia.util.ints import uint32
from salvia.wallet.util.wallet_types import WalletType
from salvia.wallet.wallet_action import WalletAction
class WalletActionStore:
"""
WalletActionStore keeps track of all wallet actions that require persistence.
Used by Colored coins, Atomic swaps, Rate Limited, and Authorized payee wallets
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS action_queue("
"id INTEGER PRIMARY KEY AUTOINCREMENT,"
" name text,"
" wallet_id int,"
" wallet_type int,"
" wallet_callback text,"
" done int,"
" data text)"
)
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS name on action_queue(name)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on action_queue(wallet_id)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_type on action_queue(wallet_type)")
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM action_queue")
await cursor.close()
await self.db_connection.commit()
async def get_wallet_action(self, id: int) -> Optional[WalletAction]:
"""
Return a wallet action by id
"""
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
async def create_action(
self, name: str, wallet_id: int, type: int, callback: str, done: bool, data: str, in_transaction: bool
):
"""
Creates Wallet Action
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT INTO action_queue VALUES(?, ?, ?, ?, ?, ?, ?)",
(None, name, wallet_id, type, callback, done, data),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def action_done(self, action_id: int):
"""
Marks action as done
"""
action: Optional[WalletAction] = await self.get_wallet_action(action_id)
assert action is not None
async with self.db_wrapper.lock:
cursor = await self.db_connection.execute(
"Replace INTO action_queue VALUES(?, ?, ?, ?, ?, ?, ?)",
(
action.id,
action.name,
action.wallet_id,
action.type.value,
action.wallet_callback,
True,
action.data,
),
)
await cursor.close()
await self.db_connection.commit()
async def get_all_pending_actions(self) -> List[WalletAction]:
"""
Returns list of all pending action
"""
result: List[WalletAction] = []
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE done=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
if rows is None:
return result
for row in rows:
action = WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
result.append(action)
return result
async def get_action_by_id(self, id) -> Optional[WalletAction]:
"""
Return a wallet action by id
"""
cursor = await self.db_connection.execute("SELECT * from action_queue WHERE id=?", (id,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return WalletAction(row[0], row[1], row[2], WalletType(row[3]), row[4], bool(row[5]), row[6])
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_action_store.py
| 0.809088 | 0.299124 |
wallet_action_store.py
|
pypi
|
import asyncio
import logging
from typing import List, Optional, Set, Tuple
import aiosqlite
from blspy import G1Element
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.db_wrapper import DBWrapper
from salvia.util.ints import uint32
from salvia.wallet.derivation_record import DerivationRecord
from salvia.wallet.util.wallet_types import WalletType
log = logging.getLogger(__name__)
class WalletPuzzleStore:
"""
WalletPuzzleStore keeps track of all generated puzzle_hashes and their derivation path / wallet.
This is only used for HD wallets where each address is derived from a public key. Otherwise, use the
WalletInterestedStore to keep track of puzzle hashes which we are interested in.
"""
db_connection: aiosqlite.Connection
lock: asyncio.Lock
cache_size: uint32
all_puzzle_hashes: Set[bytes32]
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(600000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.db_connection = self.db_wrapper.db
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS derivation_paths("
"derivation_index int,"
" pubkey text,"
" puzzle_hash text PRIMARY_KEY,"
" wallet_type int,"
" wallet_id int,"
" used tinyint)"
)
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS derivation_index_index on derivation_paths(derivation_index)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS ph on derivation_paths(puzzle_hash)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS pubkey on derivation_paths(pubkey)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_type on derivation_paths(wallet_type)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS wallet_id on derivation_paths(wallet_id)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS used on derivation_paths(wallet_type)")
await self.db_connection.commit()
# Lock
self.lock = asyncio.Lock() # external
await self._init_cache()
return self
async def close(self):
await self.db_connection.close()
async def _init_cache(self):
self.all_puzzle_hashes = await self.get_all_puzzle_hashes()
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM derivation_paths")
await cursor.close()
await self.db_connection.commit()
async def add_derivation_paths(self, records: List[DerivationRecord], in_transaction=False) -> None:
"""
Insert many derivation paths into the database.
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
sql_records = []
for record in records:
self.all_puzzle_hashes.add(record.puzzle_hash)
sql_records.append(
(
record.index,
bytes(record.pubkey).hex(),
record.puzzle_hash.hex(),
record.wallet_type,
record.wallet_id,
0,
),
)
cursor = await self.db_connection.executemany(
"INSERT OR REPLACE INTO derivation_paths VALUES(?, ?, ?, ?, ?, ?)",
sql_records,
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_derivation_record(self, index: uint32, wallet_id: uint32) -> Optional[DerivationRecord]:
"""
Returns the derivation record by index and wallet id.
"""
cursor = await self.db_connection.execute(
"SELECT * FROM derivation_paths WHERE derivation_index=? and wallet_id=?;",
(
index,
wallet_id,
),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return DerivationRecord(
uint32(row[0]),
bytes32.fromhex(row[2]),
G1Element.from_bytes(bytes.fromhex(row[1])),
WalletType(row[3]),
uint32(row[4]),
)
return None
async def get_derivation_record_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[DerivationRecord]:
"""
Returns the derivation record by index and wallet id.
"""
cursor = await self.db_connection.execute(
"SELECT * FROM derivation_paths WHERE puzzle_hash=?;",
(puzzle_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return DerivationRecord(
uint32(row[0]),
bytes32.fromhex(row[2]),
G1Element.from_bytes(bytes.fromhex(row[1])),
WalletType(row[3]),
uint32(row[4]),
)
return None
async def set_used_up_to(self, index: uint32, in_transaction=False) -> None:
"""
Sets a derivation path to used so we don't use it again.
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"UPDATE derivation_paths SET used=1 WHERE derivation_index<=?",
(index,),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def puzzle_hash_exists(self, puzzle_hash: bytes32) -> bool:
"""
Checks if passed puzzle_hash is present in the db.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
return row is not None
async def one_of_puzzle_hashes_exists(self, puzzle_hashes: List[bytes32]) -> bool:
"""
Checks if one of the passed puzzle_hashes is present in the db.
"""
if len(puzzle_hashes) < 1:
return False
for ph in puzzle_hashes:
if ph in self.all_puzzle_hashes:
return True
return False
async def index_for_pubkey(self, pubkey: G1Element) -> Optional[uint32]:
"""
Returns derivation paths for the given pubkey.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE pubkey=?", (bytes(pubkey).hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def index_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[uint32]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def index_for_puzzle_hash_and_wallet(self, puzzle_hash: bytes32, wallet_id: uint32) -> Optional[uint32]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=? and wallet_id=?;",
(
puzzle_hash.hex(),
wallet_id,
),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return uint32(row[0])
return None
async def wallet_info_for_puzzle_hash(self, puzzle_hash: bytes32) -> Optional[Tuple[uint32, WalletType]]:
"""
Returns the derivation path for the puzzle_hash.
Returns None if not present.
"""
cursor = await self.db_connection.execute(
"SELECT * from derivation_paths WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return row[4], WalletType(row[3])
return None
async def get_all_puzzle_hashes(self) -> Set[bytes32]:
"""
Return a set containing all puzzle_hashes we generated.
"""
cursor = await self.db_connection.execute("SELECT * from derivation_paths")
rows = await cursor.fetchall()
await cursor.close()
result: Set[bytes32] = set()
for row in rows:
result.add(bytes32(bytes.fromhex(row[2])))
return result
async def get_last_derivation_path(self) -> Optional[uint32]:
"""
Returns the last derivation path by derivation_index.
"""
cursor = await self.db_connection.execute("SELECT MAX(derivation_index) FROM derivation_paths;")
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
async def get_last_derivation_path_for_wallet(self, wallet_id: int) -> Optional[uint32]:
"""
Returns the last derivation path by derivation_index.
"""
cursor = await self.db_connection.execute(
f"SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id};"
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
async def get_current_derivation_record_for_wallet(self, wallet_id: uint32) -> Optional[DerivationRecord]:
"""
Returns the current derivation record by derivation_index.
"""
cursor = await self.db_connection.execute(
f"SELECT MAX(derivation_index) FROM derivation_paths WHERE wallet_id={wallet_id} and used=1;"
)
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
index = uint32(row[0])
return await self.get_derivation_record(index, wallet_id)
return None
async def get_unused_derivation_path(self) -> Optional[uint32]:
"""
Returns the first unused derivation path by derivation_index.
"""
cursor = await self.db_connection.execute("SELECT MIN(derivation_index) FROM derivation_paths WHERE used=0;")
row = await cursor.fetchone()
await cursor.close()
if row is not None and row[0] is not None:
return uint32(row[0])
return None
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_puzzle_store.py
| 0.798815 | 0.201912 |
wallet_puzzle_store.py
|
pypi
|
from typing import List, Optional
from blspy import AugSchemeMPL, PrivateKey, G1Element
from salvia.util.ints import uint32
# EIP 2334 bls key derivation
# https://eips.ethereum.org/EIPS/eip-2334
# 12381 = bls spec number
# 8444 = Salvia blockchain number and port number
# 0, 1, 2, 3, 4, 5, 6 farmer, pool, wallet, local, backup key, singleton, pooling authentication key numbers
def _derive_path(sk: PrivateKey, path: List[int]) -> PrivateKey:
for index in path:
sk = AugSchemeMPL.derive_child_sk(sk, index)
return sk
def master_sk_to_farmer_sk(master: PrivateKey) -> PrivateKey:
return _derive_path(master, [12381, 8444, 0, 0])
def master_sk_to_pool_sk(master: PrivateKey) -> PrivateKey:
return _derive_path(master, [12381, 8444, 1, 0])
def master_sk_to_wallet_sk(master: PrivateKey, index: uint32) -> PrivateKey:
return _derive_path(master, [12381, 8444, 2, index])
def master_sk_to_local_sk(master: PrivateKey) -> PrivateKey:
return _derive_path(master, [12381, 8444, 3, 0])
def master_sk_to_backup_sk(master: PrivateKey) -> PrivateKey:
return _derive_path(master, [12381, 8444, 4, 0])
def master_sk_to_singleton_owner_sk(master: PrivateKey, wallet_id: uint32) -> PrivateKey:
"""
This key controls a singleton on the blockchain, allowing for dynamic pooling (changing pools)
"""
return _derive_path(master, [12381, 8444, 5, wallet_id])
def master_sk_to_pooling_authentication_sk(master: PrivateKey, wallet_id: uint32, index: uint32) -> PrivateKey:
"""
This key is used for the farmer to authenticate to the pool when sending partials
"""
assert index < 10000
assert wallet_id < 10000
return _derive_path(master, [12381, 8444, 6, wallet_id * 10000 + index])
async def find_owner_sk(all_sks: List[PrivateKey], owner_pk: G1Element) -> Optional[G1Element]:
for wallet_id in range(50):
for sk in all_sks:
auth_sk = master_sk_to_singleton_owner_sk(sk, uint32(wallet_id))
if auth_sk.get_g1() == owner_pk:
return auth_sk
return None
async def find_authentication_sk(all_sks: List[PrivateKey], authentication_pk: G1Element) -> Optional[PrivateKey]:
# NOTE: might need to increase this if using a large number of wallets, or have switched authentication keys
# many times.
for auth_key_index in range(20):
for wallet_id in range(20):
for sk in all_sks:
auth_sk = master_sk_to_pooling_authentication_sk(sk, uint32(wallet_id), uint32(auth_key_index))
if auth_sk.get_g1() == authentication_pk:
return auth_sk
return None
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/derive_keys.py
| 0.911087 | 0.577793 |
derive_keys.py
|
pypi
|
from dataclasses import dataclass
from typing import List, Optional, Tuple
from salvia.consensus.coinbase import pool_parent_id, farmer_parent_id
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.mempool_inclusion_status import MempoolInclusionStatus
from salvia.types.spend_bundle import SpendBundle
from salvia.util.ints import uint8, uint32, uint64
from salvia.util.streamable import Streamable, streamable
from salvia.wallet.util.transaction_type import TransactionType
@dataclass(frozen=True)
@streamable
class TransactionRecord(Streamable):
"""
Used for storing transaction data and status in wallets.
"""
confirmed_at_height: uint32
created_at_time: uint64
to_puzzle_hash: bytes32
amount: uint64
fee_amount: uint64
confirmed: bool
sent: uint32
spend_bundle: Optional[SpendBundle]
additions: List[Coin]
removals: List[Coin]
wallet_id: uint32
# Represents the list of peers that we sent the transaction to, whether each one
# included it in the mempool, and what the error message (if any) was
sent_to: List[Tuple[str, uint8, Optional[str]]]
trade_id: Optional[bytes32]
type: uint32 # TransactionType
name: bytes32
def is_in_mempool(self) -> bool:
# If one of the nodes we sent it to responded with success, we set it to success
for (_, mis, _) in self.sent_to:
if MempoolInclusionStatus(mis) == MempoolInclusionStatus.SUCCESS:
return True
# Note, transactions pending inclusion (pending) return false
return False
def height_farmed(self, genesis_challenge: bytes32) -> Optional[uint32]:
if not self.confirmed:
return None
if self.type == TransactionType.FEE_REWARD or self.type == TransactionType.COINBASE_REWARD:
for block_index in range(self.confirmed_at_height, self.confirmed_at_height - 100, -1):
if block_index < 0:
return None
pool_parent = pool_parent_id(uint32(block_index), genesis_challenge)
farmer_parent = farmer_parent_id(uint32(block_index), genesis_challenge)
if pool_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
if farmer_parent == self.additions[0].parent_coin_info:
return uint32(block_index)
return None
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/transaction_record.py
| 0.868381 | 0.45042 |
transaction_record.py
|
pypi
|
from salvia.protocols import full_node_protocol, introducer_protocol, wallet_protocol
from salvia.server.outbound_message import NodeType
from salvia.server.ws_connection import WSSalviaConnection
from salvia.types.mempool_inclusion_status import MempoolInclusionStatus
from salvia.util.api_decorators import api_request, peer_required, execute_task
from salvia.util.errors import Err
from salvia.wallet.wallet_node import WalletNode
class WalletNodeAPI:
wallet_node: WalletNode
def __init__(self, wallet_node) -> None:
self.wallet_node = wallet_node
@property
def log(self):
return self.wallet_node.log
@property
def api_ready(self):
return self.wallet_node.logged_in
@peer_required
@api_request
async def respond_removals(self, response: wallet_protocol.RespondRemovals, peer: WSSalviaConnection):
pass
async def reject_removals_request(self, response: wallet_protocol.RejectRemovalsRequest, peer: WSSalviaConnection):
"""
The full node has rejected our request for removals.
"""
pass
@api_request
async def reject_additions_request(self, response: wallet_protocol.RejectAdditionsRequest):
"""
The full node has rejected our request for additions.
"""
pass
@execute_task
@peer_required
@api_request
async def new_peak_wallet(self, peak: wallet_protocol.NewPeakWallet, peer: WSSalviaConnection):
"""
The full node sent as a new peak
"""
await self.wallet_node.new_peak_wallet(peak, peer)
@api_request
async def reject_block_header(self, response: wallet_protocol.RejectHeaderRequest):
"""
The full node has rejected our request for a header.
"""
pass
@api_request
async def respond_block_header(self, response: wallet_protocol.RespondBlockHeader):
pass
@peer_required
@api_request
async def respond_additions(self, response: wallet_protocol.RespondAdditions, peer: WSSalviaConnection):
pass
@api_request
async def respond_proof_of_weight(self, response: full_node_protocol.RespondProofOfWeight):
pass
@peer_required
@api_request
async def transaction_ack(self, ack: wallet_protocol.TransactionAck, peer: WSSalviaConnection):
"""
This is an ack for our previous SendTransaction call. This removes the transaction from
the send queue if we have sent it to enough nodes.
"""
assert peer.peer_node_id is not None
name = peer.peer_node_id.hex()
status = MempoolInclusionStatus(ack.status)
if self.wallet_node.wallet_state_manager is None or self.wallet_node.backup_initialized is False:
return None
if status == MempoolInclusionStatus.SUCCESS:
self.wallet_node.log.info(f"SpendBundle has been received and accepted to mempool by the FullNode. {ack}")
elif status == MempoolInclusionStatus.PENDING:
self.wallet_node.log.info(f"SpendBundle has been received (and is pending) by the FullNode. {ack}")
else:
self.wallet_node.log.warning(f"SpendBundle has been rejected by the FullNode. {ack}")
if ack.error is not None:
await self.wallet_node.wallet_state_manager.remove_from_queue(ack.txid, name, status, Err[ack.error])
else:
await self.wallet_node.wallet_state_manager.remove_from_queue(ack.txid, name, status, None)
@peer_required
@api_request
async def respond_peers_introducer(
self, request: introducer_protocol.RespondPeersIntroducer, peer: WSSalviaConnection
):
if not self.wallet_node.has_full_node():
await self.wallet_node.wallet_peers.respond_peers(request, peer.get_peer_info(), False)
else:
await self.wallet_node.wallet_peers.ensure_is_closed()
if peer is not None and peer.connection_type is NodeType.INTRODUCER:
await peer.close()
@peer_required
@api_request
async def respond_peers(self, request: full_node_protocol.RespondPeers, peer: WSSalviaConnection):
if not self.wallet_node.has_full_node():
self.log.info(f"Wallet received {len(request.peer_list)} peers.")
await self.wallet_node.wallet_peers.respond_peers(request, peer.get_peer_info(), True)
else:
self.log.info(f"Wallet received {len(request.peer_list)} peers, but ignoring, since we have a full node.")
await self.wallet_node.wallet_peers.ensure_is_closed()
return None
@api_request
async def respond_puzzle_solution(self, request: wallet_protocol.RespondPuzzleSolution):
if self.wallet_node.wallet_state_manager is None or self.wallet_node.backup_initialized is False:
return None
await self.wallet_node.wallet_state_manager.puzzle_solution_received(request)
@api_request
async def reject_puzzle_solution(self, request: wallet_protocol.RejectPuzzleSolution):
self.log.warning(f"Reject puzzle solution: {request}")
@api_request
async def respond_header_blocks(self, request: wallet_protocol.RespondHeaderBlocks):
pass
@api_request
async def reject_header_blocks(self, request: wallet_protocol.RejectHeaderBlocks):
self.log.warning(f"Reject header blocks: {request}")
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_node_api.py
| 0.714827 | 0.268633 |
wallet_node_api.py
|
pypi
|
from typing import List, Tuple, Optional
import aiosqlite
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.db_wrapper import DBWrapper
class WalletInterestedStore:
"""
Stores coin ids that we are interested in receiving
"""
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
@classmethod
async def create(cls, wrapper: DBWrapper):
self = cls()
self.db_connection = wrapper.db
self.db_wrapper = wrapper
await self.db_connection.execute("CREATE TABLE IF NOT EXISTS interested_coins(coin_name text PRIMARY KEY)")
await self.db_connection.execute(
"CREATE TABLE IF NOT EXISTS interested_puzzle_hashes(puzzle_hash text PRIMARY KEY, wallet_id integer)"
)
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM puzzle_hashes")
await cursor.close()
cursor = await self.db_connection.execute("DELETE FROM interested_coins")
await cursor.close()
await self.db_connection.commit()
async def get_interested_coin_ids(self) -> List[bytes32]:
cursor = await self.db_connection.execute("SELECT coin_name FROM interested_coins")
rows_hex = await cursor.fetchall()
return [bytes32(bytes.fromhex(row[0])) for row in rows_hex]
async def add_interested_coin_id(self, coin_id: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_coins VALUES (?)", (coin_id.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def get_interested_puzzle_hashes(self) -> List[Tuple[bytes32, int]]:
cursor = await self.db_connection.execute("SELECT puzzle_hash, wallet_id FROM interested_puzzle_hashes")
rows_hex = await cursor.fetchall()
return [(bytes32(bytes.fromhex(row[0])), row[1]) for row in rows_hex]
async def get_interested_puzzle_hash_wallet_id(self, puzzle_hash: bytes32) -> Optional[int]:
cursor = await self.db_connection.execute(
"SELECT wallet_id FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
row = await cursor.fetchone()
if row is None:
return None
return row[0]
async def add_interested_puzzle_hash(
self, puzzle_hash: bytes32, wallet_id: int, in_transaction: bool = False
) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO interested_puzzle_hashes VALUES (?, ?)", (puzzle_hash.hex(), wallet_id)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def remove_interested_puzzle_hash(self, puzzle_hash: bytes32, in_transaction: bool = False) -> None:
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"DELETE FROM interested_puzzle_hashes WHERE puzzle_hash=?", (puzzle_hash.hex(),)
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet_interested_store.py
| 0.801354 | 0.172503 |
wallet_interested_store.py
|
pypi
|
import inspect
from typing import List, Any
import blspy
from blspy import AugSchemeMPL
from salvia.types.coin_spend import CoinSpend
from salvia.types.spend_bundle import SpendBundle
from salvia.util.condition_tools import conditions_dict_for_solution, pkm_pairs_for_conditions_dict
async def sign_coin_spends(
coin_spends: List[CoinSpend],
secret_key_for_public_key_f: Any, # Potentially awaitable function from G1Element => Optional[PrivateKey]
additional_data: bytes,
max_cost: int,
) -> SpendBundle:
signatures: List[blspy.G2Element] = []
pk_list: List[blspy.G1Element] = []
msg_list: List[bytes] = []
for coin_spend in coin_spends:
# Get AGG_SIG conditions
err, conditions_dict, cost = conditions_dict_for_solution(
coin_spend.puzzle_reveal, coin_spend.solution, max_cost
)
if err or conditions_dict is None:
error_msg = f"Sign transaction failed, con:{conditions_dict}, error: {err}"
raise ValueError(error_msg)
# Create signature
for pk_bytes, msg in pkm_pairs_for_conditions_dict(
conditions_dict, bytes(coin_spend.coin.name()), additional_data
):
pk = blspy.G1Element.from_bytes(pk_bytes)
pk_list.append(pk)
msg_list.append(msg)
if inspect.iscoroutinefunction(secret_key_for_public_key_f):
secret_key = await secret_key_for_public_key_f(pk)
else:
secret_key = secret_key_for_public_key_f(pk)
if secret_key is None:
e_msg = f"no secret key for {pk}"
raise ValueError(e_msg)
assert bytes(secret_key.get_g1()) == bytes(pk)
signature = AugSchemeMPL.sign(secret_key, msg)
assert AugSchemeMPL.verify(pk, msg, signature)
signatures.append(signature)
# Aggregate signatures
aggsig = AugSchemeMPL.aggregate(signatures)
assert AugSchemeMPL.aggregate_verify(pk_list, msg_list, aggsig)
return SpendBundle(coin_spends, aggsig)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/sign_coin_spends.py
| 0.628407 | 0.244543 |
sign_coin_spends.py
|
pypi
|
import logging
import time
from typing import Any, Dict, List, Optional, Set
from blspy import G1Element
from salvia.consensus.cost_calculator import calculate_cost_of_program, NPCResult
from salvia.full_node.bundle_tools import simple_solution_generator
from salvia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program, SerializedProgram
from salvia.types.announcement import Announcement
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.coin_spend import CoinSpend
from salvia.types.generator_types import BlockGenerator
from salvia.types.spend_bundle import SpendBundle
from salvia.util.ints import uint8, uint32, uint64, uint128
from salvia.util.hash import std_hash
from salvia.wallet.derivation_record import DerivationRecord
from salvia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE_HASH,
calculate_synthetic_secret_key,
puzzle_for_pk,
solution_for_conditions,
)
from salvia.wallet.puzzles.puzzle_utils import (
make_assert_coin_announcement,
make_assert_puzzle_announcement,
make_assert_my_coin_id_condition,
make_assert_absolute_seconds_exceeds_condition,
make_create_coin_announcement,
make_create_puzzle_announcement,
make_create_coin_condition,
make_reserve_fee_condition,
)
from salvia.wallet.secret_key_store import SecretKeyStore
from salvia.wallet.sign_coin_spends import sign_coin_spends
from salvia.wallet.transaction_record import TransactionRecord
from salvia.wallet.util.transaction_type import TransactionType
from salvia.wallet.util.wallet_types import WalletType
from salvia.wallet.wallet_coin_record import WalletCoinRecord
from salvia.wallet.wallet_info import WalletInfo
class Wallet:
wallet_state_manager: Any
log: logging.Logger
wallet_id: uint32
secret_key_store: SecretKeyStore
cost_of_single_tx: Optional[int]
@staticmethod
async def create(
wallet_state_manager: Any,
info: WalletInfo,
name: str = None,
):
self = Wallet()
self.log = logging.getLogger(name if name else __name__)
self.wallet_state_manager = wallet_state_manager
self.wallet_id = info.id
self.secret_key_store = SecretKeyStore()
self.cost_of_single_tx = None
return self
async def get_max_send_amount(self, records=None):
spendable: List[WalletCoinRecord] = list(
await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id(), records)
)
if len(spendable) == 0:
return 0
spendable.sort(reverse=True, key=lambda record: record.coin.amount)
if self.cost_of_single_tx is None:
coin = spendable[0].coin
tx = await self.generate_signed_transaction(
coin.amount, coin.puzzle_hash, coins={coin}, ignore_max_send_amount=True
)
program: BlockGenerator = simple_solution_generator(tx.spend_bundle)
# npc contains names of the coins removed, puzzle_hashes and their spend conditions
result: NPCResult = get_name_puzzle_conditions(
program,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.wallet_state_manager.constants.COST_PER_BYTE,
safe_mode=True,
)
cost_result: uint64 = calculate_cost_of_program(
program.program, result, self.wallet_state_manager.constants.COST_PER_BYTE
)
self.cost_of_single_tx = cost_result
self.log.info(f"Cost of a single tx for standard wallet: {self.cost_of_single_tx}")
max_cost = self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM / 5 # avoid full block TXs
current_cost = 0
total_amount = 0
total_coin_count = 0
for record in spendable:
current_cost += self.cost_of_single_tx
total_amount += record.coin.amount
total_coin_count += 1
if current_cost + self.cost_of_single_tx > max_cost:
break
return total_amount
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.STANDARD_WALLET)
def id(self) -> uint32:
return self.wallet_id
async def get_confirmed_balance(self, unspent_records=None) -> uint128:
return await self.wallet_state_manager.get_confirmed_balance_for_wallet(self.id(), unspent_records)
async def get_unconfirmed_balance(self, unspent_records=None) -> uint128:
return await self.wallet_state_manager.get_unconfirmed_balance(self.id(), unspent_records)
async def get_spendable_balance(self, unspent_records=None) -> uint128:
spendable = await self.wallet_state_manager.get_confirmed_spendable_balance_for_wallet(
self.id(), unspent_records
)
return spendable
async def get_pending_change_balance(self) -> uint64:
unconfirmed_tx = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.id())
addition_amount = 0
for record in unconfirmed_tx:
if not record.is_in_mempool():
self.log.warning(f"Record: {record} not in mempool")
continue
our_spend = False
for coin in record.removals:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
our_spend = True
break
if our_spend is not True:
continue
for coin in record.additions:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
addition_amount += coin.amount
return uint64(addition_amount)
def puzzle_for_pk(self, pubkey: bytes) -> Program:
return puzzle_for_pk(pubkey)
async def hack_populate_secret_key_for_puzzle_hash(self, puzzle_hash: bytes32) -> G1Element:
maybe = await self.wallet_state_manager.get_keys(puzzle_hash)
if maybe is None:
error_msg = f"Wallet couldn't find keys for puzzle_hash {puzzle_hash}"
self.log.error(error_msg)
raise ValueError(error_msg)
# Get puzzle for pubkey
public_key, secret_key = maybe
# HACK
synthetic_secret_key = calculate_synthetic_secret_key(secret_key, DEFAULT_HIDDEN_PUZZLE_HASH)
self.secret_key_store.save_secret_key(synthetic_secret_key)
return public_key
async def hack_populate_secret_keys_for_coin_spends(self, coin_spends: List[CoinSpend]) -> None:
"""
This hack forces secret keys into the `_pk2sk` lookup. This should eventually be replaced
by a persistent DB table that can do this look-up directly.
"""
for coin_spend in coin_spends:
await self.hack_populate_secret_key_for_puzzle_hash(coin_spend.coin.puzzle_hash)
async def puzzle_for_puzzle_hash(self, puzzle_hash: bytes32) -> Program:
public_key = await self.hack_populate_secret_key_for_puzzle_hash(puzzle_hash)
return puzzle_for_pk(bytes(public_key))
async def get_new_puzzle(self) -> Program:
dr = await self.wallet_state_manager.get_unused_derivation_record(self.id())
return puzzle_for_pk(bytes(dr.pubkey))
async def get_puzzle_hash(self, new: bool) -> bytes32:
if new:
return await self.get_new_puzzlehash()
else:
record: Optional[
DerivationRecord
] = await self.wallet_state_manager.get_current_derivation_record_for_wallet(self.id())
if record is None:
return await self.get_new_puzzlehash()
return record.puzzle_hash
async def get_new_puzzlehash(self, in_transaction: bool = False) -> bytes32:
return (await self.wallet_state_manager.get_unused_derivation_record(self.id(), in_transaction)).puzzle_hash
def make_solution(
self,
primaries: Optional[List[Dict[str, Any]]] = None,
min_time=0,
me=None,
coin_announcements: Optional[Set[bytes32]] = None,
coin_announcements_to_assert: Optional[Set[bytes32]] = None,
puzzle_announcements: Optional[Set[bytes32]] = None,
puzzle_announcements_to_assert: Optional[Set[bytes32]] = None,
fee=0,
) -> Program:
assert fee >= 0
condition_list = []
if primaries:
for primary in primaries:
condition_list.append(make_create_coin_condition(primary["puzzlehash"], primary["amount"]))
if min_time > 0:
condition_list.append(make_assert_absolute_seconds_exceeds_condition(min_time))
if me:
condition_list.append(make_assert_my_coin_id_condition(me["id"]))
if fee:
condition_list.append(make_reserve_fee_condition(fee))
if coin_announcements:
for announcement in coin_announcements:
condition_list.append(make_create_coin_announcement(announcement))
if coin_announcements_to_assert:
for announcement_hash in coin_announcements_to_assert:
condition_list.append(make_assert_coin_announcement(announcement_hash))
if puzzle_announcements:
for announcement in puzzle_announcements:
condition_list.append(make_create_puzzle_announcement(announcement))
if puzzle_announcements_to_assert:
for announcement_hash in puzzle_announcements_to_assert:
condition_list.append(make_assert_puzzle_announcement(announcement_hash))
return solution_for_conditions(condition_list)
async def select_coins(self, amount, exclude: List[Coin] = None) -> Set[Coin]:
"""
Returns a set of coins that can be used for generating a new transaction.
Note: This must be called under a wallet state manager lock
"""
if exclude is None:
exclude = []
spendable_amount = await self.get_spendable_balance()
if amount > spendable_amount:
error_msg = (
f"Can't select amount higher than our spendable balance. Amount: {amount}, spendable: "
f" {spendable_amount}"
)
self.log.warning(error_msg)
raise ValueError(error_msg)
self.log.info(f"About to select coins for amount {amount}")
unspent: List[WalletCoinRecord] = list(
await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id())
)
sum_value = 0
used_coins: Set = set()
# Use older coins first
unspent.sort(reverse=True, key=lambda r: r.coin.amount)
# Try to use coins from the store, if there isn't enough of "unused"
# coins use change coins that are not confirmed yet
unconfirmed_removals: Dict[bytes32, Coin] = await self.wallet_state_manager.unconfirmed_removals_for_wallet(
self.id()
)
for coinrecord in unspent:
if sum_value >= amount and len(used_coins) > 0:
break
if coinrecord.coin.name() in unconfirmed_removals:
continue
if coinrecord.coin in exclude:
continue
sum_value += coinrecord.coin.amount
used_coins.add(coinrecord.coin)
self.log.debug(f"Selected coin: {coinrecord.coin.name()} at height {coinrecord.confirmed_block_height}!")
# This happens when we couldn't use one of the coins because it's already used
# but unconfirmed, and we are waiting for the change. (unconfirmed_additions)
if sum_value < amount:
raise ValueError(
"Can't make this transaction at the moment. Waiting for the change from the previous transaction."
)
self.log.debug(f"Successfully selected coins: {used_coins}")
return used_coins
async def _generate_unsigned_transaction(
self,
amount: uint64,
newpuzzlehash: bytes32,
fee: uint64 = uint64(0),
origin_id: bytes32 = None,
coins: Set[Coin] = None,
primaries_input: Optional[List[Dict[str, Any]]] = None,
ignore_max_send_amount: bool = False,
announcements_to_consume: Set[Announcement] = None,
) -> List[CoinSpend]:
"""
Generates a unsigned transaction in form of List(Puzzle, Solutions)
Note: this must be called under a wallet state manager lock
"""
if primaries_input is None:
primaries: Optional[List[Dict]] = None
total_amount = amount + fee
else:
primaries = primaries_input.copy()
primaries_amount = 0
for prim in primaries:
primaries_amount += prim["amount"]
total_amount = amount + fee + primaries_amount
if not ignore_max_send_amount:
max_send = await self.get_max_send_amount()
if total_amount > max_send:
raise ValueError(f"Can't send more than {max_send} in a single transaction")
if coins is None:
coins = await self.select_coins(total_amount)
assert len(coins) > 0
self.log.info(f"coins is not None {coins}")
spend_value = sum([coin.amount for coin in coins])
change = spend_value - total_amount
assert change >= 0
spends: List[CoinSpend] = []
primary_announcement_hash: Optional[bytes32] = None
# Check for duplicates
if primaries is not None:
all_primaries_list = [(p["puzzlehash"], p["amount"]) for p in primaries] + [(newpuzzlehash, amount)]
if len(set(all_primaries_list)) != len(all_primaries_list):
raise ValueError("Cannot create two identical coins")
for coin in coins:
self.log.info(f"coin from coins {coin}")
puzzle: Program = await self.puzzle_for_puzzle_hash(coin.puzzle_hash)
# Only one coin creates outputs
if primary_announcement_hash is None and origin_id in (None, coin.name()):
if primaries is None:
primaries = [{"puzzlehash": newpuzzlehash, "amount": amount}]
else:
primaries.append({"puzzlehash": newpuzzlehash, "amount": amount})
if change > 0:
change_puzzle_hash: bytes32 = await self.get_new_puzzlehash()
primaries.append({"puzzlehash": change_puzzle_hash, "amount": change})
message_list: List[bytes32] = [c.name() for c in coins]
for primary in primaries:
message_list.append(Coin(coin.name(), primary["puzzlehash"], primary["amount"]).name())
message: bytes32 = std_hash(b"".join(message_list))
solution: Program = self.make_solution(
primaries=primaries,
fee=fee,
coin_announcements={message},
coin_announcements_to_assert=announcements_to_consume,
)
primary_announcement_hash = Announcement(coin.name(), message).name()
else:
solution = self.make_solution(coin_announcements_to_assert={primary_announcement_hash})
spends.append(
CoinSpend(
coin, SerializedProgram.from_bytes(bytes(puzzle)), SerializedProgram.from_bytes(bytes(solution))
)
)
self.log.info(f"Spends is {spends}")
return spends
async def sign_transaction(self, coin_spends: List[CoinSpend]) -> SpendBundle:
return await sign_coin_spends(
coin_spends,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
async def generate_signed_transaction(
self,
amount: uint64,
puzzle_hash: bytes32,
fee: uint64 = uint64(0),
origin_id: bytes32 = None,
coins: Set[Coin] = None,
primaries: Optional[List[Dict[str, bytes32]]] = None,
ignore_max_send_amount: bool = False,
announcements_to_consume: Set[Announcement] = None,
) -> TransactionRecord:
"""
Use this to generate transaction.
Note: this must be called under a wallet state manager lock
"""
if primaries is None:
non_change_amount = amount
else:
non_change_amount = uint64(amount + sum(p["amount"] for p in primaries))
transaction = await self._generate_unsigned_transaction(
amount, puzzle_hash, fee, origin_id, coins, primaries, ignore_max_send_amount, announcements_to_consume
)
assert len(transaction) > 0
self.log.info("About to sign a transaction")
await self.hack_populate_secret_keys_for_coin_spends(transaction)
spend_bundle: SpendBundle = await sign_coin_spends(
transaction,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
now = uint64(int(time.time()))
add_list: List[Coin] = list(spend_bundle.additions())
rem_list: List[Coin] = list(spend_bundle.removals())
assert sum(a.amount for a in add_list) + fee == sum(r.amount for r in rem_list)
return TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=now,
to_puzzle_hash=puzzle_hash,
amount=uint64(non_change_amount),
fee_amount=uint64(fee),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=add_list,
removals=rem_list,
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
async def push_transaction(self, tx: TransactionRecord) -> None:
"""Use this API to send transactions."""
await self.wallet_state_manager.add_pending_transaction(tx)
# This is to be aggregated together with a coloured coin offer to ensure that the trade happens
async def create_spend_bundle_relative_salvia(self, salvia_amount: int, exclude: List[Coin]) -> SpendBundle:
list_of_solutions = []
utxos = None
# If we're losing value then get coins with at least that much value
# If we're gaining value then our amount doesn't matter
if salvia_amount < 0:
utxos = await self.select_coins(abs(salvia_amount), exclude)
else:
utxos = await self.select_coins(0, exclude)
assert len(utxos) > 0
# Calculate output amount given sum of utxos
spend_value = sum([coin.amount for coin in utxos])
salvia_amount = spend_value + salvia_amount
# Create coin solutions for each utxo
output_created = None
for coin in utxos:
puzzle = await self.puzzle_for_puzzle_hash(coin.puzzle_hash)
if output_created is None:
newpuzhash = await self.get_new_puzzlehash()
primaries = [{"puzzlehash": newpuzhash, "amount": salvia_amount}]
solution = self.make_solution(primaries=primaries)
output_created = coin
list_of_solutions.append(CoinSpend(coin, puzzle, solution))
await self.hack_populate_secret_keys_for_coin_spends(list_of_solutions)
spend_bundle = await sign_coin_spends(
list_of_solutions,
self.secret_key_store.secret_key_for_public_key,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
return spend_bundle
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/wallet.py
| 0.763131 | 0.415847 |
wallet.py
|
pypi
|
import logging
import time
import traceback
from pathlib import Path
from secrets import token_bytes
from typing import Any, Dict, List, Optional, Tuple
from blspy import AugSchemeMPL
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.spend_bundle import SpendBundle
from salvia.types.coin_spend import CoinSpend
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.db_wrapper import DBWrapper
from salvia.util.hash import std_hash
from salvia.util.ints import uint32, uint64
from salvia.wallet.cc_wallet import cc_utils
from salvia.wallet.cc_wallet.cc_utils import CC_MOD, SpendableCC, spend_bundle_for_spendable_ccs, uncurry_cc
from salvia.wallet.cc_wallet.cc_wallet import CCWallet
from salvia.wallet.puzzles.genesis_by_coin_id_with_0 import genesis_coin_id_for_genesis_coin_checker
from salvia.wallet.trade_record import TradeRecord
from salvia.wallet.trading.trade_status import TradeStatus
from salvia.wallet.trading.trade_store import TradeStore
from salvia.wallet.transaction_record import TransactionRecord
from salvia.wallet.util.trade_utils import (
get_discrepancies_for_spend_bundle,
get_output_amount_for_puzzle_and_solution,
get_output_discrepancy_for_puzzle_and_solution,
)
from salvia.wallet.util.transaction_type import TransactionType
from salvia.wallet.util.wallet_types import WalletType
from salvia.wallet.wallet import Wallet
from salvia.wallet.wallet_coin_record import WalletCoinRecord
class TradeManager:
wallet_state_manager: Any
log: logging.Logger
trade_store: TradeStore
@staticmethod
async def create(
wallet_state_manager: Any,
db_wrapper: DBWrapper,
name: str = None,
):
self = TradeManager()
if name:
self.log = logging.getLogger(name)
else:
self.log = logging.getLogger(__name__)
self.wallet_state_manager = wallet_state_manager
self.trade_store = await TradeStore.create(db_wrapper)
return self
async def get_offers_with_status(self, status: TradeStatus) -> List[TradeRecord]:
records = await self.trade_store.get_trade_record_with_status(status)
return records
async def get_coins_of_interest(
self,
) -> Tuple[Dict[bytes32, Coin], Dict[bytes32, Coin]]:
"""
Returns list of coins we want to check if they are included in filter,
These will include coins that belong to us and coins that that on other side of treade
"""
all_pending = []
pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)
pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)
pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)
all_pending.extend(pending_accept)
all_pending.extend(pending_confirm)
all_pending.extend(pending_cancel)
removals = {}
additions = {}
for trade in all_pending:
for coin in trade.removals:
removals[coin.name()] = coin
for coin in trade.additions:
additions[coin.name()] = coin
return removals, additions
async def get_trade_by_coin(self, coin: Coin) -> Optional[TradeRecord]:
all_trades = await self.get_all_trades()
for trade in all_trades:
if trade.status == TradeStatus.CANCELED.value:
continue
if coin in trade.removals:
return trade
if coin in trade.additions:
return trade
return None
async def coins_of_interest_farmed(self, removals: List[Coin], additions: List[Coin], height: uint32):
"""
If both our coins and other coins in trade got removed that means that trade was successfully executed
If coins from other side of trade got farmed without ours, that means that trade failed because either someone
else completed trade or other side of trade canceled the trade by doing a spend.
If our coins got farmed but coins from other side didn't, we successfully canceled trade by spending inputs.
"""
removal_dict = {}
addition_dict = {}
checked: Dict[bytes32, Coin] = {}
for coin in removals:
removal_dict[coin.name()] = coin
for coin in additions:
addition_dict[coin.name()] = coin
all_coins = []
all_coins.extend(removals)
all_coins.extend(additions)
for coin in all_coins:
if coin.name() in checked:
continue
trade = await self.get_trade_by_coin(coin)
if trade is None:
self.log.error(f"Coin: {Coin}, not in any trade")
continue
# Check if all coins that are part of the trade got farmed
# If coin is missing, trade failed
failed = False
for removed_coin in trade.removals:
if removed_coin.name() not in removal_dict:
self.log.error(f"{removed_coin} from trade not removed")
failed = True
checked[removed_coin.name()] = removed_coin
for added_coin in trade.additions:
if added_coin.name() not in addition_dict:
self.log.error(f"{added_coin} from trade not added")
failed = True
checked[coin.name()] = coin
if failed is False:
# Mark this trade as successful
await self.trade_store.set_status(trade.trade_id, TradeStatus.CONFIRMED, True, height)
self.log.info(f"Trade with id: {trade.trade_id} confirmed at height: {height}")
else:
# Either we canceled this trade or this trade failed
if trade.status == TradeStatus.PENDING_CANCEL.value:
await self.trade_store.set_status(trade.trade_id, TradeStatus.CANCELED, True)
self.log.info(f"Trade with id: {trade.trade_id} canceled at height: {height}")
elif trade.status == TradeStatus.PENDING_CONFIRM.value:
await self.trade_store.set_status(trade.trade_id, TradeStatus.FAILED, True)
self.log.warning(f"Trade with id: {trade.trade_id} failed at height: {height}")
async def get_locked_coins(self, wallet_id: int = None) -> Dict[bytes32, WalletCoinRecord]:
"""Returns a dictionary of confirmed coins that are locked by a trade."""
all_pending = []
pending_accept = await self.get_offers_with_status(TradeStatus.PENDING_ACCEPT)
pending_confirm = await self.get_offers_with_status(TradeStatus.PENDING_CONFIRM)
pending_cancel = await self.get_offers_with_status(TradeStatus.PENDING_CANCEL)
all_pending.extend(pending_accept)
all_pending.extend(pending_confirm)
all_pending.extend(pending_cancel)
if len(all_pending) == 0:
return {}
result = {}
for trade_offer in all_pending:
if trade_offer.tx_spend_bundle is None:
locked = await self.get_locked_coins_in_spend_bundle(trade_offer.spend_bundle)
else:
locked = await self.get_locked_coins_in_spend_bundle(trade_offer.tx_spend_bundle)
for name, record in locked.items():
if wallet_id is None or record.wallet_id == wallet_id:
result[name] = record
return result
async def get_all_trades(self):
all: List[TradeRecord] = await self.trade_store.get_all_trades()
return all
async def get_trade_by_id(self, trade_id: bytes) -> Optional[TradeRecord]:
record = await self.trade_store.get_trade_record(trade_id)
return record
async def get_locked_coins_in_spend_bundle(self, bundle: SpendBundle) -> Dict[bytes32, WalletCoinRecord]:
"""Returns a list of coin records that are used in this SpendBundle"""
result = {}
removals = bundle.removals()
for coin in removals:
coin_record = await self.wallet_state_manager.coin_store.get_coin_record(coin.name())
if coin_record is None:
continue
result[coin_record.name()] = coin_record
return result
async def cancel_pending_offer(self, trade_id: bytes32):
await self.trade_store.set_status(trade_id, TradeStatus.CANCELED, False)
async def cancel_pending_offer_safely(self, trade_id: bytes32):
"""This will create a transaction that includes coins that were offered"""
self.log.info(f"Secure-Cancel pending offer with id trade_id {trade_id.hex()}")
trade = await self.trade_store.get_trade_record(trade_id)
if trade is None:
return None
all_coins = trade.removals
for coin in all_coins:
wallet = await self.wallet_state_manager.get_wallet_for_coin(coin.name())
if wallet is None:
continue
new_ph = await wallet.get_new_puzzlehash()
if wallet.type() == WalletType.COLOURED_COIN.value:
tx = await wallet.generate_signed_transaction(
[coin.amount], [new_ph], 0, coins={coin}, ignore_max_send_amount=True
)
else:
tx = await wallet.generate_signed_transaction(
coin.amount, new_ph, 0, coins={coin}, ignore_max_send_amount=True
)
await self.wallet_state_manager.add_pending_transaction(tx_record=tx)
await self.trade_store.set_status(trade_id, TradeStatus.PENDING_CANCEL, False)
return None
async def save_trade(self, trade: TradeRecord):
await self.trade_store.add_trade_record(trade, False)
async def create_offer_for_ids(
self, offer: Dict[int, int], file_name: str
) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
success, trade_offer, error = await self._create_offer_for_ids(offer)
if success is True and trade_offer is not None:
self.write_offer_to_disk(Path(file_name), trade_offer)
await self.save_trade(trade_offer)
return success, trade_offer, error
async def _create_offer_for_ids(self, offer: Dict[int, int]) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
"""
Offer is dictionary of wallet ids and amount
"""
spend_bundle = None
try:
for id in offer.keys():
amount = offer[id]
wallet_id = uint32(int(id))
wallet = self.wallet_state_manager.wallets[wallet_id]
if isinstance(wallet, CCWallet):
balance = await wallet.get_confirmed_balance()
if balance < abs(amount) and amount < 0:
raise Exception(f"insufficient funds in wallet {wallet_id}")
if amount > 0:
if spend_bundle is None:
to_exclude: List[Coin] = []
else:
to_exclude = spend_bundle.removals()
zero_spend_bundle: SpendBundle = await wallet.generate_zero_val_coin(False, to_exclude)
if spend_bundle is None:
spend_bundle = zero_spend_bundle
else:
spend_bundle = SpendBundle.aggregate([spend_bundle, zero_spend_bundle])
additions = zero_spend_bundle.additions()
removals = zero_spend_bundle.removals()
zero_val_coin: Optional[Coin] = None
for add in additions:
if add not in removals and add.amount == 0:
zero_val_coin = add
new_spend_bundle = await wallet.create_spend_bundle_relative_amount(amount, zero_val_coin)
else:
new_spend_bundle = await wallet.create_spend_bundle_relative_amount(amount)
elif isinstance(wallet, Wallet):
if spend_bundle is None:
to_exclude = []
else:
to_exclude = spend_bundle.removals()
new_spend_bundle = await wallet.create_spend_bundle_relative_salvia(amount, to_exclude)
else:
return False, None, "unsupported wallet type"
if new_spend_bundle is None or new_spend_bundle.removals() == []:
raise Exception(f"Wallet {id} was unable to create offer.")
if spend_bundle is None:
spend_bundle = new_spend_bundle
else:
spend_bundle = SpendBundle.aggregate([spend_bundle, new_spend_bundle])
if spend_bundle is None:
return False, None, None
now = uint64(int(time.time()))
trade_offer: TradeRecord = TradeRecord(
confirmed_at_index=uint32(0),
accepted_at_time=None,
created_at_time=now,
my_offer=True,
sent=uint32(0),
spend_bundle=spend_bundle,
tx_spend_bundle=None,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
trade_id=std_hash(spend_bundle.name() + bytes(now)),
status=uint32(TradeStatus.PENDING_ACCEPT.value),
sent_to=[],
)
return True, trade_offer, None
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Error with creating trade offer: {type(e)}{tb}")
return False, None, str(e)
def write_offer_to_disk(self, file_path: Path, offer: TradeRecord):
if offer is not None:
file_path.write_text(bytes(offer).hex())
async def get_discrepancies_for_offer(self, file_path: Path) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
self.log.info(f"trade offer: {file_path}")
trade_offer_hex = file_path.read_text()
trade_offer = TradeRecord.from_bytes(bytes.fromhex(trade_offer_hex))
return get_discrepancies_for_spend_bundle(trade_offer.spend_bundle)
async def get_inner_puzzle_for_puzzle_hash(self, puzzle_hash) -> Program:
info = await self.wallet_state_manager.puzzle_store.get_derivation_record_for_puzzle_hash(puzzle_hash)
assert info is not None
puzzle = self.wallet_state_manager.main_wallet.puzzle_for_pk(bytes(info.pubkey))
return puzzle
async def maybe_create_wallets_for_offer(self, file_path: Path) -> bool:
success, result, error = await self.get_discrepancies_for_offer(file_path)
if not success or result is None:
return False
for key, value in result.items():
wsm = self.wallet_state_manager
wallet: Wallet = wsm.main_wallet
if key == "salvia":
continue
self.log.info(f"value is {key}")
exists = await wsm.get_wallet_for_colour(key)
if exists is not None:
continue
await CCWallet.create_wallet_for_cc(wsm, wallet, key)
return True
async def respond_to_offer(self, file_path: Path) -> Tuple[bool, Optional[TradeRecord], Optional[str]]:
has_wallets = await self.maybe_create_wallets_for_offer(file_path)
if not has_wallets:
return False, None, "Unknown Error"
trade_offer = None
try:
trade_offer_hex = file_path.read_text()
trade_offer = TradeRecord.from_bytes(hexstr_to_bytes(trade_offer_hex))
except Exception as e:
return False, None, f"Error: {e}"
if trade_offer is not None:
offer_spend_bundle: SpendBundle = trade_offer.spend_bundle
coinsols: List[CoinSpend] = [] # [] of CoinSpends
cc_coinsol_outamounts: Dict[bytes32, List[Tuple[CoinSpend, int]]] = dict()
aggsig = offer_spend_bundle.aggregated_signature
cc_discrepancies: Dict[bytes32, int] = dict()
salvia_discrepancy = None
wallets: Dict[bytes32, Any] = dict() # colour to wallet dict
for coinsol in offer_spend_bundle.coin_spends:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
colour = bytes(genesis_checker).hex()
if colour not in wallets:
wallets[colour] = await self.wallet_state_manager.get_wallet_for_colour(colour)
unspent = await self.wallet_state_manager.get_spendable_coins_for_wallet(wallets[colour].id())
if coinsol.coin in [record.coin for record in unspent]:
return False, None, "can't respond to own offer"
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
# Store coinsol and output amount for later
if colour in cc_coinsol_outamounts:
cc_coinsol_outamounts[colour].append((coinsol, total))
else:
cc_coinsol_outamounts[colour] = [(coinsol, total)]
else:
# standard salvia coin
unspent = await self.wallet_state_manager.get_spendable_coins_for_wallet(1)
if coinsol.coin in [record.coin for record in unspent]:
return False, None, "can't respond to own offer"
if salvia_discrepancy is None:
salvia_discrepancy = get_output_discrepancy_for_puzzle_and_solution(coinsol.coin, puzzle, solution)
else:
salvia_discrepancy += get_output_discrepancy_for_puzzle_and_solution(coinsol.coin, puzzle, solution)
coinsols.append(coinsol)
salvia_spend_bundle: Optional[SpendBundle] = None
if salvia_discrepancy is not None:
salvia_spend_bundle = await self.wallet_state_manager.main_wallet.create_spend_bundle_relative_salvia(
salvia_discrepancy, []
)
if salvia_spend_bundle is not None:
for coinsol in coinsols:
salvia_spend_bundle.coin_spends.append(coinsol)
zero_spend_list: List[SpendBundle] = []
spend_bundle = None
# create coloured coin
self.log.info(cc_discrepancies)
for colour in cc_discrepancies.keys():
if cc_discrepancies[colour] < 0:
my_cc_spends = await wallets[colour].select_coins(abs(cc_discrepancies[colour]))
else:
if salvia_spend_bundle is None:
to_exclude: List = []
else:
to_exclude = salvia_spend_bundle.removals()
my_cc_spends = await wallets[colour].select_coins(0)
if my_cc_spends is None or my_cc_spends == set():
zero_spend_bundle: SpendBundle = await wallets[colour].generate_zero_val_coin(False, to_exclude)
if zero_spend_bundle is None:
return (
False,
None,
"Unable to generate zero value coin. Confirm that you have salvia available",
)
zero_spend_list.append(zero_spend_bundle)
additions = zero_spend_bundle.additions()
removals = zero_spend_bundle.removals()
my_cc_spends = set()
for add in additions:
if add not in removals and add.amount == 0:
my_cc_spends.add(add)
if my_cc_spends == set() or my_cc_spends is None:
return False, None, "insufficient funds"
# Create SpendableCC list and innersol_list with both my coins and the offered coins
# Firstly get the output coin
my_output_coin = my_cc_spends.pop()
spendable_cc_list = []
innersol_list = []
genesis_id = genesis_coin_id_for_genesis_coin_checker(Program.from_bytes(bytes.fromhex(colour)))
# Make the rest of the coins assert the output coin is consumed
for coloured_coin in my_cc_spends:
inner_solution = self.wallet_state_manager.main_wallet.make_solution(consumed=[my_output_coin.name()])
inner_puzzle = await self.get_inner_puzzle_for_puzzle_hash(coloured_coin.puzzle_hash)
assert inner_puzzle is not None
sigs = await wallets[colour].get_sigs(inner_puzzle, inner_solution, coloured_coin.name())
sigs.append(aggsig)
aggsig = AugSchemeMPL.aggregate(sigs)
lineage_proof = await wallets[colour].get_lineage_proof_for_coin(coloured_coin)
spendable_cc_list.append(SpendableCC(coloured_coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
# Create SpendableCC for each of the coloured coins received
for cc_coinsol_out in cc_coinsol_outamounts[colour]:
cc_coinsol = cc_coinsol_out[0]
puzzle = Program.from_bytes(bytes(cc_coinsol.puzzle_reveal))
solution = Program.from_bytes(bytes(cc_coinsol.solution))
r = uncurry_cc(puzzle)
if r:
mod_hash, genesis_coin_checker, inner_puzzle = r
inner_solution = solution.first()
lineage_proof = solution.rest().rest().first()
spendable_cc_list.append(SpendableCC(cc_coinsol.coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
# Finish the output coin SpendableCC with new information
newinnerpuzhash = await wallets[colour].get_new_inner_hash()
outputamount = sum([c.amount for c in my_cc_spends]) + cc_discrepancies[colour] + my_output_coin.amount
inner_solution = self.wallet_state_manager.main_wallet.make_solution(
primaries=[{"puzzlehash": newinnerpuzhash, "amount": outputamount}]
)
inner_puzzle = await self.get_inner_puzzle_for_puzzle_hash(my_output_coin.puzzle_hash)
assert inner_puzzle is not None
lineage_proof = await wallets[colour].get_lineage_proof_for_coin(my_output_coin)
spendable_cc_list.append(SpendableCC(my_output_coin, genesis_id, inner_puzzle, lineage_proof))
innersol_list.append(inner_solution)
sigs = await wallets[colour].get_sigs(inner_puzzle, inner_solution, my_output_coin.name())
sigs.append(aggsig)
aggsig = AugSchemeMPL.aggregate(sigs)
if spend_bundle is None:
spend_bundle = spend_bundle_for_spendable_ccs(
CC_MOD,
Program.from_bytes(bytes.fromhex(colour)),
spendable_cc_list,
innersol_list,
[aggsig],
)
else:
new_spend_bundle = spend_bundle_for_spendable_ccs(
CC_MOD,
Program.from_bytes(bytes.fromhex(colour)),
spendable_cc_list,
innersol_list,
[aggsig],
)
spend_bundle = SpendBundle.aggregate([spend_bundle, new_spend_bundle])
# reset sigs and aggsig so that they aren't included next time around
sigs = []
aggsig = AugSchemeMPL.aggregate(sigs)
my_tx_records = []
if zero_spend_list is not None and spend_bundle is not None:
zero_spend_list.append(spend_bundle)
spend_bundle = SpendBundle.aggregate(zero_spend_list)
if spend_bundle is None:
return False, None, "spend_bundle missing"
# Add transaction history for this trade
now = uint64(int(time.time()))
if salvia_spend_bundle is not None:
spend_bundle = SpendBundle.aggregate([spend_bundle, salvia_spend_bundle])
if salvia_discrepancy < 0:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=now,
to_puzzle_hash=token_bytes(),
amount=uint64(abs(salvia_discrepancy)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=salvia_spend_bundle,
additions=salvia_spend_bundle.additions(),
removals=salvia_spend_bundle.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=salvia_spend_bundle.name(),
)
else:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(salvia_discrepancy)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=salvia_spend_bundle,
additions=salvia_spend_bundle.additions(),
removals=salvia_spend_bundle.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.INCOMING_TRADE.value),
name=salvia_spend_bundle.name(),
)
my_tx_records.append(tx_record)
for colour, amount in cc_discrepancies.items():
wallet = wallets[colour]
if salvia_discrepancy > 0:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(amount)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet.id(),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=spend_bundle.name(),
)
else:
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(abs(amount)),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet.id(),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.INCOMING_TRADE.value),
name=token_bytes(),
)
my_tx_records.append(tx_record)
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=token_bytes(),
amount=uint64(0),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=uint32(0),
sent_to=[],
trade_id=std_hash(spend_bundle.name() + bytes(now)),
type=uint32(TransactionType.OUTGOING_TRADE.value),
name=spend_bundle.name(),
)
now = uint64(int(time.time()))
trade_record: TradeRecord = TradeRecord(
confirmed_at_index=uint32(0),
accepted_at_time=now,
created_at_time=now,
my_offer=False,
sent=uint32(0),
spend_bundle=offer_spend_bundle,
tx_spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
trade_id=std_hash(spend_bundle.name() + bytes(now)),
status=uint32(TradeStatus.PENDING_CONFIRM.value),
sent_to=[],
)
await self.save_trade(trade_record)
await self.wallet_state_manager.add_pending_transaction(tx_record)
for tx in my_tx_records:
await self.wallet_state_manager.add_transaction(tx)
return True, trade_record, None
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/trade_manager.py
| 0.723212 | 0.233816 |
trade_manager.py
|
pypi
|
from clvm_tools import binutils
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.program import Program
from typing import List, Optional, Tuple
from blspy import G1Element
from salvia.types.blockchain_format.coin import Coin
from salvia.types.coin_spend import CoinSpend
from salvia.util.ints import uint64
from salvia.wallet.puzzles.load_clvm import load_clvm
from salvia.types.condition_opcodes import ConditionOpcode
SINGLETON_TOP_LAYER_MOD = load_clvm("singleton_top_layer.clvm")
LAUNCHER_PUZZLE = load_clvm("singleton_launcher.clvm")
DID_INNERPUZ_MOD = load_clvm("did_innerpuz.clvm")
SINGLETON_LAUNCHER = load_clvm("singleton_launcher.clvm")
def create_innerpuz(pubkey: bytes, identities: List[bytes], num_of_backup_ids_needed: uint64) -> Program:
backup_ids_hash = Program(Program.to(identities)).get_tree_hash()
# MOD_HASH MY_PUBKEY RECOVERY_DID_LIST_HASH NUM_VERIFICATIONS_REQUIRED
return DID_INNERPUZ_MOD.curry(pubkey, backup_ids_hash, num_of_backup_ids_needed)
def create_fullpuz(innerpuz: Program, genesis_id: bytes32) -> Program:
mod_hash = SINGLETON_TOP_LAYER_MOD.get_tree_hash()
# singleton_struct = (MOD_HASH . (LAUNCHER_ID . LAUNCHER_PUZZLE_HASH))
singleton_struct = Program.to((mod_hash, (genesis_id, LAUNCHER_PUZZLE.get_tree_hash())))
return SINGLETON_TOP_LAYER_MOD.curry(singleton_struct, innerpuz)
def get_pubkey_from_innerpuz(innerpuz: Program) -> G1Element:
ret = uncurry_innerpuz(innerpuz)
if ret is not None:
pubkey_program = ret[0]
else:
raise ValueError("Unable to extract pubkey")
pubkey = G1Element.from_bytes(pubkey_program.as_atom())
return pubkey
def is_did_innerpuz(inner_f: Program):
"""
You may want to generalize this if different `CC_MOD` templates are supported.
"""
return inner_f == DID_INNERPUZ_MOD
def is_did_core(inner_f: Program):
return inner_f == SINGLETON_TOP_LAYER_MOD
def uncurry_innerpuz(puzzle: Program) -> Optional[Tuple[Program, Program]]:
"""
Take a puzzle and return `None` if it's not a `CC_MOD` cc, or
a triple of `mod_hash, genesis_coin_checker, inner_puzzle` if it is.
"""
r = puzzle.uncurry()
if r is None:
return r
inner_f, args = r
if not is_did_innerpuz(inner_f):
return None
pubkey, id_list, num_of_backup_ids_needed = list(args.as_iter())
return pubkey, id_list
def get_innerpuzzle_from_puzzle(puzzle: Program) -> Optional[Program]:
r = puzzle.uncurry()
if r is None:
return None
inner_f, args = r
if not is_did_core(inner_f):
return None
SINGLETON_STRUCT, INNER_PUZZLE = list(args.as_iter())
return INNER_PUZZLE
def create_recovery_message_puzzle(recovering_coin_id: bytes32, newpuz: bytes32, pubkey: G1Element):
puzstring = f"(q . ((0x{ConditionOpcode.CREATE_COIN_ANNOUNCEMENT.hex()} 0x{recovering_coin_id.hex()}) (0x{ConditionOpcode.AGG_SIG_UNSAFE.hex()} 0x{bytes(pubkey).hex()} 0x{newpuz.hex()})))" # noqa
puz = binutils.assemble(puzstring)
return Program.to(puz)
def create_spend_for_message(parent_of_message, recovering_coin, newpuz, pubkey):
puzzle = create_recovery_message_puzzle(recovering_coin, newpuz, pubkey)
coin = Coin(parent_of_message, puzzle.get_tree_hash(), uint64(0))
solution = Program.to([])
coinsol = CoinSpend(coin, puzzle, solution)
return coinsol
# inspect puzzle and check it is a DID puzzle
def check_is_did_puzzle(puzzle: Program):
r = puzzle.uncurry()
if r is None:
return r
inner_f, args = r
return is_did_core(inner_f)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/did_wallet/did_wallet_puzzles.py
| 0.784154 | 0.228479 |
did_wallet_puzzles.py
|
pypi
|
import hashlib
from typing import Union
from blspy import G1Element, PrivateKey
from clvm.casts import int_from_bytes
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from .load_clvm import load_clvm
from .p2_conditions import puzzle_for_conditions
DEFAULT_HIDDEN_PUZZLE = Program.from_bytes(bytes.fromhex("ff0980"))
DEFAULT_HIDDEN_PUZZLE_HASH = DEFAULT_HIDDEN_PUZZLE.get_tree_hash() # this puzzle `(x)` always fails
MOD = load_clvm("p2_delegated_puzzle_or_hidden_puzzle.clvm")
SYNTHETIC_MOD = load_clvm("calculate_synthetic_public_key.clvm")
PublicKeyProgram = Union[bytes, Program]
GROUP_ORDER = 0x73EDA753299D7D483339D80809A1D80553BDA402FFFE5BFEFFFFFFFF00000001
def calculate_synthetic_offset(public_key: G1Element, hidden_puzzle_hash: bytes32) -> int:
blob = hashlib.sha256(bytes(public_key) + hidden_puzzle_hash).digest()
offset = int_from_bytes(blob)
offset %= GROUP_ORDER
return offset
def calculate_synthetic_public_key(public_key: G1Element, hidden_puzzle_hash: bytes32) -> G1Element:
r = SYNTHETIC_MOD.run([bytes(public_key), hidden_puzzle_hash])
return G1Element.from_bytes(r.as_atom())
def calculate_synthetic_secret_key(secret_key: PrivateKey, hidden_puzzle_hash: bytes32) -> PrivateKey:
secret_exponent = int.from_bytes(bytes(secret_key), "big")
public_key = secret_key.get_g1()
synthetic_offset = calculate_synthetic_offset(public_key, hidden_puzzle_hash)
synthetic_secret_exponent = (secret_exponent + synthetic_offset) % GROUP_ORDER
blob = synthetic_secret_exponent.to_bytes(32, "big")
synthetic_secret_key = PrivateKey.from_bytes(blob)
return synthetic_secret_key
def puzzle_for_synthetic_public_key(synthetic_public_key: G1Element) -> Program:
return MOD.curry(bytes(synthetic_public_key))
def puzzle_for_public_key_and_hidden_puzzle_hash(public_key: G1Element, hidden_puzzle_hash: bytes32) -> Program:
synthetic_public_key = calculate_synthetic_public_key(public_key, hidden_puzzle_hash)
return puzzle_for_synthetic_public_key(synthetic_public_key)
def puzzle_for_public_key_and_hidden_puzzle(public_key: G1Element, hidden_puzzle: Program) -> Program:
return puzzle_for_public_key_and_hidden_puzzle_hash(public_key, hidden_puzzle.get_tree_hash())
def puzzle_for_pk(public_key: G1Element) -> Program:
return puzzle_for_public_key_and_hidden_puzzle_hash(public_key, DEFAULT_HIDDEN_PUZZLE_HASH)
def solution_for_delegated_puzzle(delegated_puzzle: Program, solution: Program) -> Program:
return Program.to([[], delegated_puzzle, solution])
def solution_for_hidden_puzzle(
hidden_public_key: G1Element,
hidden_puzzle: Program,
solution_to_hidden_puzzle: Program,
) -> Program:
return Program.to([hidden_public_key, hidden_puzzle, solution_to_hidden_puzzle])
def solution_for_conditions(conditions) -> Program:
delegated_puzzle = puzzle_for_conditions(conditions)
return solution_for_delegated_puzzle(delegated_puzzle, Program.to(0))
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/puzzles/p2_delegated_puzzle_or_hidden_puzzle.py
| 0.819424 | 0.288272 |
p2_delegated_puzzle_or_hidden_puzzle.py
|
pypi
|
from typing import List, Tuple, Optional
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.types.coin_spend import CoinSpend
from salvia.wallet.puzzles.load_clvm import load_clvm
from salvia.wallet.lineage_proof import LineageProof
from salvia.util.ints import uint64
from salvia.util.hash import std_hash
SINGLETON_MOD = load_clvm("singleton_top_layer.clvm")
SINGLETON_MOD_HASH = SINGLETON_MOD.get_tree_hash()
P2_SINGLETON_MOD = load_clvm("p2_singleton.clvm")
P2_SINGLETON_OR_DELAYED_MOD = load_clvm("p2_singleton_or_delayed_puzhash.clvm")
SINGLETON_LAUNCHER = load_clvm("singleton_launcher.clvm")
SINGLETON_LAUNCHER_HASH = SINGLETON_LAUNCHER.get_tree_hash()
ESCAPE_VALUE = -113
MELT_CONDITION = [ConditionOpcode.CREATE_COIN, 0, ESCAPE_VALUE]
# Given the parent and amount of the launcher coin, return the launcher coin
def generate_launcher_coin(coin: Coin, amount: uint64) -> Coin:
return Coin(coin.name(), SINGLETON_LAUNCHER_HASH, amount)
# Wrap inner puzzles that are not singleton specific to strip away "truths"
def adapt_inner_to_singleton(inner_puzzle: Program) -> Program:
# (a (q . inner_puzzle) (r 1))
return Program.to([2, (1, inner_puzzle), [6, 1]])
# Take standard coin and amount -> launch conditions & launcher coin solution
def launch_conditions_and_coinsol(
coin: Coin,
inner_puzzle: Program,
comment: List[Tuple[str, str]],
amount: uint64,
) -> Tuple[List[Program], CoinSpend]:
if (amount % 2) == 0:
raise ValueError("Coin amount cannot be even. Subtract one seed.")
launcher_coin: Coin = generate_launcher_coin(coin, amount)
curried_singleton: Program = SINGLETON_MOD.curry(
(SINGLETON_MOD_HASH, (launcher_coin.name(), SINGLETON_LAUNCHER_HASH)),
inner_puzzle,
)
launcher_solution = Program.to(
[
curried_singleton.get_tree_hash(),
amount,
comment,
]
)
create_launcher = Program.to(
[
ConditionOpcode.CREATE_COIN,
SINGLETON_LAUNCHER_HASH,
amount,
],
)
assert_launcher_announcement = Program.to(
[
ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT,
std_hash(launcher_coin.name() + launcher_solution.get_tree_hash()),
],
)
conditions = [create_launcher, assert_launcher_announcement]
launcher_coin_spend = CoinSpend(
launcher_coin,
SINGLETON_LAUNCHER,
launcher_solution,
)
return conditions, launcher_coin_spend
# Take a coin solution, return a lineage proof for their child to use in spends
def lineage_proof_for_coinsol(coin_spend: CoinSpend) -> LineageProof:
parent_name: bytes32 = coin_spend.coin.parent_coin_info
inner_puzzle_hash: Optional[bytes32] = None
if coin_spend.coin.puzzle_hash != SINGLETON_LAUNCHER_HASH:
full_puzzle = Program.from_bytes(bytes(coin_spend.puzzle_reveal))
r = full_puzzle.uncurry()
if r is not None:
_, args = r
_, inner_puzzle = list(args.as_iter())
inner_puzzle_hash = inner_puzzle.get_tree_hash()
amount: uint64 = coin_spend.coin.amount
return LineageProof(
parent_name,
inner_puzzle_hash,
amount,
)
# Return the puzzle reveal of a singleton with specific ID and innerpuz
def puzzle_for_singleton(launcher_id: bytes32, inner_puz: Program) -> Program:
return SINGLETON_MOD.curry(
(SINGLETON_MOD_HASH, (launcher_id, SINGLETON_LAUNCHER_HASH)),
inner_puz,
)
# Return a solution to spend a singleton
def solution_for_singleton(
lineage_proof: LineageProof,
amount: uint64,
inner_solution: Program,
) -> Program:
if lineage_proof.inner_puzzle_hash is None:
parent_info = [
lineage_proof.parent_name,
lineage_proof.amount,
]
else:
parent_info = [
lineage_proof.parent_name,
lineage_proof.inner_puzzle_hash,
lineage_proof.amount,
]
return Program.to([parent_info, amount, inner_solution])
# Create a coin that a singleton can claim
def pay_to_singleton_puzzle(launcher_id: bytes32) -> Program:
return P2_SINGLETON_MOD.curry(SINGLETON_MOD_HASH, launcher_id, SINGLETON_LAUNCHER_HASH)
# Create a coin that a singleton can claim or that can be sent to another puzzle after a specified time
def pay_to_singleton_or_delay_puzzle(launcher_id: bytes32, delay_time: uint64, delay_ph: bytes32) -> Program:
return P2_SINGLETON_OR_DELAYED_MOD.curry(
SINGLETON_MOD_HASH,
launcher_id,
SINGLETON_LAUNCHER_HASH,
delay_time,
delay_ph,
)
# Solution for EITHER p2_singleton or the claiming spend case for p2_singleton_or_delayed_puzhash
def solution_for_p2_singleton(p2_singleton_coin: Coin, singleton_inner_puzhash: bytes32) -> Program:
return Program.to([singleton_inner_puzhash, p2_singleton_coin.name()])
# Solution for the delayed spend case for p2_singleton_or_delayed_puzhash
def solution_for_p2_delayed_puzzle(output_amount: uint64) -> Program:
return Program.to([output_amount, []])
# Get announcement conditions for singleton solution and full CoinSpend for the claimed coin
def claim_p2_singleton(
p2_singleton_coin: Coin,
singleton_inner_puzhash: bytes32,
launcher_id: bytes32,
delay_time: Optional[uint64] = None,
delay_ph: Optional[bytes32] = None,
) -> Tuple[Program, Program, CoinSpend]:
assertion = Program.to([ConditionOpcode.ASSERT_COIN_ANNOUNCEMENT, std_hash(p2_singleton_coin.name() + b"$")])
announcement = Program.to([ConditionOpcode.CREATE_PUZZLE_ANNOUNCEMENT, p2_singleton_coin.name()])
if delay_time is None or delay_ph is None:
puzzle: Program = pay_to_singleton_puzzle(launcher_id)
else:
puzzle = pay_to_singleton_or_delay_puzzle(
launcher_id,
delay_time,
delay_ph,
)
claim_coinsol = CoinSpend(
p2_singleton_coin,
puzzle,
solution_for_p2_singleton(p2_singleton_coin, singleton_inner_puzhash),
)
return assertion, announcement, claim_coinsol
# Get the CoinSpend for spending to a delayed puzzle
def spend_to_delayed_puzzle(
p2_singleton_coin: Coin,
output_amount: uint64,
launcher_id: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> CoinSpend:
claim_coinsol = CoinSpend(
p2_singleton_coin,
pay_to_singleton_or_delay_puzzle(launcher_id, delay_time, delay_ph),
solution_for_p2_delayed_puzzle(output_amount),
)
return claim_coinsol
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/puzzles/singleton_top_layer.py
| 0.856392 | 0.339034 |
singleton_top_layer.py
|
pypi
|
from typing import Any, Dict
from salvia.wallet.key_val_store import KeyValStore
from salvia.wallet.settings.default_settings import default_settings
from salvia.wallet.settings.settings_objects import BackupInitialized
class UserSettings:
settings: Dict[str, Any]
basic_store: KeyValStore
@staticmethod
async def create(
store: KeyValStore,
name: str = None,
):
self = UserSettings()
self.basic_store = store
self.settings = {}
await self.load_store()
return self
def _keys(self):
all_keys = [BackupInitialized]
return all_keys
async def load_store(self):
keys = self._keys()
for setting in keys:
name = setting.__name__
object = await self.basic_store.get_object(name, BackupInitialized)
if object is None:
object = default_settings[name]
assert object is not None
self.settings[name] = object
async def setting_updated(self, setting: Any):
name = setting.__class__.__name__
await self.basic_store.set_object(name, setting)
self.settings[name] = setting
async def user_skipped_backup_import(self):
new = BackupInitialized(
user_initialized=True,
user_skipped=True,
backup_info_imported=False,
new_wallet=False,
)
await self.setting_updated(new)
return new
async def user_imported_backup(self):
new = BackupInitialized(
user_initialized=True,
user_skipped=False,
backup_info_imported=True,
new_wallet=False,
)
await self.setting_updated(new)
return new
async def user_created_new_wallet(self):
new = BackupInitialized(
user_initialized=True,
user_skipped=False,
backup_info_imported=False,
new_wallet=True,
)
await self.setting_updated(new)
return new
def get_backup_settings(self) -> BackupInitialized:
return self.settings[BackupInitialized.__name__]
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/settings/user_settings.py
| 0.623033 | 0.170992 |
user_settings.py
|
pypi
|
import asyncio
import json
import time
from dataclasses import dataclass
from secrets import token_bytes
from typing import Any, List, Optional, Tuple
from blspy import AugSchemeMPL, G1Element, PrivateKey
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.coin_spend import CoinSpend
from salvia.types.spend_bundle import SpendBundle
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.ints import uint8, uint32, uint64, uint128
from salvia.util.streamable import Streamable, streamable
from salvia.wallet.derivation_record import DerivationRecord
from salvia.wallet.derive_keys import master_sk_to_wallet_sk
from salvia.wallet.rl_wallet.rl_wallet_puzzles import (
make_clawback_solution,
rl_make_aggregation_puzzle,
rl_make_aggregation_solution,
rl_make_solution_mode_2,
rl_puzzle_for_pk,
solution_for_rl,
)
from salvia.wallet.transaction_record import TransactionRecord
from salvia.wallet.util.transaction_type import TransactionType
from salvia.wallet.util.wallet_types import WalletType
from salvia.wallet.wallet import Wallet
from salvia.wallet.wallet_coin_record import WalletCoinRecord
from salvia.wallet.wallet_info import WalletInfo
@dataclass(frozen=True)
@streamable
class RLInfo(Streamable):
type: str
admin_pubkey: Optional[bytes]
user_pubkey: Optional[bytes]
limit: Optional[uint64]
interval: Optional[uint64]
rl_origin: Optional[Coin]
rl_origin_id: Optional[bytes32]
rl_puzzle_hash: Optional[bytes32]
initialized: bool
class RLWallet:
wallet_state_manager: Any
wallet_info: WalletInfo
rl_coin_record: Optional[WalletCoinRecord]
rl_info: RLInfo
main_wallet: Wallet
private_key: PrivateKey
@staticmethod
async def create_rl_admin(
wallet_state_manager: Any,
):
unused: Optional[uint32] = await wallet_state_manager.puzzle_store.get_unused_derivation_path()
if unused is None:
await wallet_state_manager.create_more_puzzle_hashes()
unused = await wallet_state_manager.puzzle_store.get_unused_derivation_path()
assert unused is not None
private_key = master_sk_to_wallet_sk(wallet_state_manager.private_key, unused)
pubkey: G1Element = private_key.get_g1()
rl_info = RLInfo("admin", bytes(pubkey), None, None, None, None, None, None, False)
info_as_string = json.dumps(rl_info.to_json_dict())
wallet_info: Optional[WalletInfo] = await wallet_state_manager.user_store.create_wallet(
"RL Admin", WalletType.RATE_LIMITED, info_as_string
)
if wallet_info is None:
raise Exception("wallet_info is None")
await wallet_state_manager.puzzle_store.add_derivation_paths(
[
DerivationRecord(
unused,
bytes32(token_bytes(32)),
pubkey,
WalletType.RATE_LIMITED,
wallet_info.id,
)
]
)
await wallet_state_manager.puzzle_store.set_used_up_to(unused)
self = await RLWallet.create(wallet_state_manager, wallet_info)
await wallet_state_manager.add_new_wallet(self, self.id())
return self
@staticmethod
async def create_rl_user(
wallet_state_manager: Any,
):
async with wallet_state_manager.puzzle_store.lock:
unused: Optional[uint32] = await wallet_state_manager.puzzle_store.get_unused_derivation_path()
if unused is None:
await wallet_state_manager.create_more_puzzle_hashes()
unused = await wallet_state_manager.puzzle_store.get_unused_derivation_path()
assert unused is not None
private_key = wallet_state_manager.private_key
pubkey: G1Element = master_sk_to_wallet_sk(private_key, unused).get_g1()
rl_info = RLInfo("user", None, bytes(pubkey), None, None, None, None, None, False)
info_as_string = json.dumps(rl_info.to_json_dict())
await wallet_state_manager.user_store.create_wallet("RL User", WalletType.RATE_LIMITED, info_as_string)
wallet_info = await wallet_state_manager.user_store.get_last_wallet()
if wallet_info is None:
raise Exception("wallet_info is None")
self = await RLWallet.create(wallet_state_manager, wallet_info)
await wallet_state_manager.puzzle_store.add_derivation_paths(
[
DerivationRecord(
unused,
bytes32(token_bytes(32)),
pubkey,
WalletType.RATE_LIMITED,
wallet_info.id,
)
]
)
await wallet_state_manager.puzzle_store.set_used_up_to(unused)
await wallet_state_manager.add_new_wallet(self, self.id())
return self
@staticmethod
async def create(wallet_state_manager: Any, info: WalletInfo):
self = RLWallet()
self.private_key = wallet_state_manager.private_key
self.wallet_state_manager = wallet_state_manager
self.wallet_info = info
self.rl_info = RLInfo.from_json_dict(json.loads(info.data))
self.main_wallet = wallet_state_manager.main_wallet
return self
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.RATE_LIMITED)
def id(self) -> uint32:
return self.wallet_info.id
async def admin_create_coin(
self,
interval: uint64,
limit: uint64,
user_pubkey: str,
amount: uint64,
fee: uint64,
) -> bool:
coins = await self.wallet_state_manager.main_wallet.select_coins(amount)
if coins is None:
return False
origin = coins.copy().pop()
origin_id = origin.name()
user_pubkey_bytes = hexstr_to_bytes(user_pubkey)
assert self.rl_info.admin_pubkey is not None
rl_puzzle = rl_puzzle_for_pk(
pubkey=user_pubkey_bytes,
rate_amount=limit,
interval_time=interval,
origin_id=origin_id,
clawback_pk=self.rl_info.admin_pubkey,
)
rl_puzzle_hash = rl_puzzle.get_tree_hash()
index = await self.wallet_state_manager.puzzle_store.index_for_pubkey(
G1Element.from_bytes(self.rl_info.admin_pubkey)
)
assert index is not None
record = DerivationRecord(
index,
rl_puzzle_hash,
G1Element.from_bytes(self.rl_info.admin_pubkey),
WalletType.RATE_LIMITED,
self.id(),
)
await self.wallet_state_manager.puzzle_store.add_derivation_paths([record])
spend_bundle = await self.main_wallet.generate_signed_transaction(amount, rl_puzzle_hash, fee, origin_id, coins)
if spend_bundle is None:
return False
await self.main_wallet.push_transaction(spend_bundle)
new_rl_info = RLInfo(
"admin",
self.rl_info.admin_pubkey,
user_pubkey_bytes,
limit,
interval,
origin,
origin.name(),
rl_puzzle_hash,
True,
)
data_str = json.dumps(new_rl_info.to_json_dict())
new_wallet_info = WalletInfo(self.id(), self.wallet_info.name, self.type(), data_str)
await self.wallet_state_manager.user_store.update_wallet(new_wallet_info, False)
await self.wallet_state_manager.add_new_wallet(self, self.id())
self.wallet_info = new_wallet_info
self.rl_info = new_rl_info
return True
async def set_user_info(
self,
interval: uint64,
limit: uint64,
origin_parent_id: str,
origin_puzzle_hash: str,
origin_amount: uint64,
admin_pubkey: str,
) -> None:
admin_pubkey_bytes = hexstr_to_bytes(admin_pubkey)
assert self.rl_info.user_pubkey is not None
origin = Coin(
hexstr_to_bytes(origin_parent_id),
hexstr_to_bytes(origin_puzzle_hash),
origin_amount,
)
rl_puzzle = rl_puzzle_for_pk(
pubkey=self.rl_info.user_pubkey,
rate_amount=limit,
interval_time=interval,
origin_id=origin.name(),
clawback_pk=admin_pubkey_bytes,
)
rl_puzzle_hash = rl_puzzle.get_tree_hash()
new_rl_info = RLInfo(
"user",
admin_pubkey_bytes,
self.rl_info.user_pubkey,
limit,
interval,
origin,
origin.name(),
rl_puzzle_hash,
True,
)
rl_puzzle_hash = rl_puzzle.get_tree_hash()
if await self.wallet_state_manager.puzzle_store.puzzle_hash_exists(rl_puzzle_hash):
raise ValueError(
"Cannot create multiple Rate Limited wallets under the same keys. This will change in a future release."
)
user_pubkey: G1Element = G1Element.from_bytes(self.rl_info.user_pubkey)
index = await self.wallet_state_manager.puzzle_store.index_for_pubkey(user_pubkey)
assert index is not None
record = DerivationRecord(
index,
rl_puzzle_hash,
user_pubkey,
WalletType.RATE_LIMITED,
self.id(),
)
aggregation_puzzlehash = self.rl_get_aggregation_puzzlehash(new_rl_info.rl_puzzle_hash)
record2 = DerivationRecord(
index + 1,
aggregation_puzzlehash,
user_pubkey,
WalletType.RATE_LIMITED,
self.id(),
)
await self.wallet_state_manager.puzzle_store.add_derivation_paths([record, record2])
self.wallet_state_manager.set_coin_with_puzzlehash_created_callback(
aggregation_puzzlehash, self.aggregate_this_coin
)
data_str = json.dumps(new_rl_info.to_json_dict())
new_wallet_info = WalletInfo(self.id(), self.wallet_info.name, self.type(), data_str)
await self.wallet_state_manager.user_store.update_wallet(new_wallet_info, False)
await self.wallet_state_manager.add_new_wallet(self, self.id())
self.wallet_info = new_wallet_info
self.rl_info = new_rl_info
async def aggregate_this_coin(self, coin: Coin):
spend_bundle = await self.rl_generate_signed_aggregation_transaction(
self.rl_info, coin, await self._get_rl_parent(), await self._get_rl_coin()
)
rl_coin = await self._get_rl_coin()
puzzle_hash = rl_coin.puzzle_hash if rl_coin is not None else None
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=puzzle_hash,
amount=uint64(0),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
asyncio.create_task(self.push_transaction(tx_record))
async def rl_available_balance(self) -> uint64:
self.rl_coin_record = await self._get_rl_coin_record()
if self.rl_coin_record is None:
return uint64(0)
peak = self.wallet_state_manager.blockchain.get_peak()
height = peak.height if peak else 0
assert self.rl_info.limit is not None
unlocked = int(
((height - self.rl_coin_record.confirmed_block_height) / self.rl_info.interval) * int(self.rl_info.limit)
)
total_amount = self.rl_coin_record.coin.amount
available_amount = min(unlocked, total_amount)
return uint64(available_amount)
async def get_confirmed_balance(self, unspent_records=None) -> uint128:
return await self.wallet_state_manager.get_confirmed_balance_for_wallet(self.id(), unspent_records)
async def get_unconfirmed_balance(self, unspent_records=None) -> uint128:
return await self.wallet_state_manager.get_unconfirmed_balance(self.id(), unspent_records)
async def get_spendable_balance(self, unspent_records=None) -> uint128:
spendable_am = await self.wallet_state_manager.get_confirmed_spendable_balance_for_wallet(self.id())
return spendable_am
async def get_max_send_amount(self, records=None):
# Rate limited wallet is a singleton, max send is same as spendable
return await self.get_spendable_balance()
async def get_pending_change_balance(self) -> uint64:
unconfirmed_tx = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.id())
addition_amount = 0
for record in unconfirmed_tx:
if not record.is_in_mempool():
continue
our_spend = False
for coin in record.removals:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
our_spend = True
break
if our_spend is not True:
continue
for coin in record.additions:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
addition_amount += coin.amount
return uint64(addition_amount)
def get_new_puzzle(self) -> Program:
if (
self.rl_info.limit is None
or self.rl_info.interval is None
or self.rl_info.user_pubkey is None
or self.rl_info.admin_pubkey is None
or self.rl_info.rl_origin_id is None
):
raise ValueError("One or more of the RL info fields is None")
return rl_puzzle_for_pk(
pubkey=self.rl_info.user_pubkey,
rate_amount=self.rl_info.limit,
interval_time=self.rl_info.interval,
origin_id=self.rl_info.rl_origin_id,
clawback_pk=self.rl_info.admin_pubkey,
)
def get_new_puzzlehash(self) -> bytes32:
return self.get_new_puzzle().get_tree_hash()
async def can_generate_rl_puzzle_hash(self, hash) -> bool:
return await self.wallet_state_manager.puzzle_store.puzzle_hash_exists(hash)
def puzzle_for_pk(self, pk) -> Optional[Program]:
if self.rl_info.initialized is False:
return None
if (
self.rl_info.limit is None
or self.rl_info.interval is None
or self.rl_info.user_pubkey is None
or self.rl_info.admin_pubkey is None
or self.rl_info.rl_origin_id is None
):
return None
return rl_puzzle_for_pk(
pubkey=self.rl_info.user_pubkey,
rate_amount=self.rl_info.limit,
interval_time=self.rl_info.interval,
origin_id=self.rl_info.rl_origin_id,
clawback_pk=self.rl_info.admin_pubkey,
)
async def get_keys(self, puzzle_hash: bytes32) -> Tuple[G1Element, PrivateKey]:
"""
Returns keys for puzzle_hash.
"""
index_for_puzzlehash = await self.wallet_state_manager.puzzle_store.index_for_puzzle_hash_and_wallet(
puzzle_hash, self.id()
)
if index_for_puzzlehash is None:
raise ValueError(f"index_for_puzzlehash is None ph {puzzle_hash}")
private = master_sk_to_wallet_sk(self.private_key, index_for_puzzlehash)
pubkey = private.get_g1()
return pubkey, private
async def get_keys_pk(self, clawback_pubkey: bytes):
"""
Return keys for pubkey
"""
index_for_pubkey = await self.wallet_state_manager.puzzle_store.index_for_pubkey(
G1Element.from_bytes(clawback_pubkey)
)
if index_for_pubkey is None:
raise ValueError(f"index_for_pubkey is None pk {clawback_pubkey.hex()}")
private = master_sk_to_wallet_sk(self.private_key, index_for_pubkey)
pubkey = private.get_g1()
return pubkey, private
async def _get_rl_coin(self) -> Optional[Coin]:
rl_coins = await self.wallet_state_manager.coin_store.get_coin_records_by_puzzle_hash(
self.rl_info.rl_puzzle_hash
)
for coin_record in rl_coins:
if coin_record.spent is False:
return coin_record.coin
return None
async def _get_rl_coin_record(self) -> Optional[WalletCoinRecord]:
rl_coins = await self.wallet_state_manager.coin_store.get_coin_records_by_puzzle_hash(
self.rl_info.rl_puzzle_hash
)
for coin_record in rl_coins:
if coin_record.spent is False:
return coin_record
return None
async def _get_rl_parent(self) -> Optional[Coin]:
self.rl_coin_record = await self._get_rl_coin_record()
if not self.rl_coin_record:
return None
rl_parent_id = self.rl_coin_record.coin.parent_coin_info
if rl_parent_id == self.rl_info.rl_origin_id:
return self.rl_info.rl_origin
rl_parent = await self.wallet_state_manager.coin_store.get_coin_record(rl_parent_id)
if rl_parent is None:
return None
return rl_parent.coin
async def rl_generate_unsigned_transaction(self, to_puzzlehash, amount, fee) -> List[CoinSpend]:
spends = []
assert self.rl_coin_record is not None
coin = self.rl_coin_record.coin
puzzle_hash = coin.puzzle_hash
pubkey = self.rl_info.user_pubkey
rl_parent: Optional[Coin] = await self._get_rl_parent()
if rl_parent is None:
raise ValueError("No RL parent coin")
# these lines make mypy happy
assert pubkey is not None
assert self.rl_info.limit is not None
assert self.rl_info.interval is not None
assert self.rl_info.rl_origin_id is not None
assert self.rl_info.admin_pubkey is not None
puzzle = rl_puzzle_for_pk(
bytes(pubkey),
self.rl_info.limit,
self.rl_info.interval,
self.rl_info.rl_origin_id,
self.rl_info.admin_pubkey,
)
solution = solution_for_rl(
coin.parent_coin_info,
puzzle_hash,
coin.amount,
to_puzzlehash,
amount,
rl_parent.parent_coin_info,
rl_parent.amount,
self.rl_info.interval,
self.rl_info.limit,
fee,
)
spends.append(CoinSpend(coin, puzzle, solution))
return spends
async def generate_signed_transaction(self, amount, to_puzzle_hash, fee: uint64 = uint64(0)) -> TransactionRecord:
self.rl_coin_record = await self._get_rl_coin_record()
if not self.rl_coin_record:
raise ValueError("No unspent coin (zero balance)")
if amount > self.rl_coin_record.coin.amount:
raise ValueError(f"Coin value not sufficient: {amount} > {self.rl_coin_record.coin.amount}")
transaction = await self.rl_generate_unsigned_transaction(to_puzzle_hash, amount, fee)
spend_bundle = await self.rl_sign_transaction(transaction)
return TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=to_puzzle_hash,
amount=uint64(amount),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
async def rl_sign_transaction(self, spends: List[CoinSpend]) -> SpendBundle:
sigs = []
for coin_spend in spends:
pubkey, secretkey = await self.get_keys(coin_spend.coin.puzzle_hash)
signature = AugSchemeMPL.sign(secretkey, coin_spend.solution.get_tree_hash())
sigs.append(signature)
aggsig = AugSchemeMPL.aggregate(sigs)
return SpendBundle(spends, aggsig)
def generate_unsigned_clawback_transaction(self, clawback_coin: Coin, clawback_puzzle_hash: bytes32, fee):
if (
self.rl_info.limit is None
or self.rl_info.interval is None
or self.rl_info.user_pubkey is None
or self.rl_info.admin_pubkey is None
):
raise ValueError("One ore more of the elements of rl_info is None")
spends = []
coin = clawback_coin
if self.rl_info.rl_origin is None:
raise ValueError("Origin not initialized")
puzzle = rl_puzzle_for_pk(
self.rl_info.user_pubkey,
self.rl_info.limit,
self.rl_info.interval,
self.rl_info.rl_origin.name(),
self.rl_info.admin_pubkey,
)
solution = make_clawback_solution(clawback_puzzle_hash, clawback_coin.amount, fee)
spends.append((puzzle, CoinSpend(coin, puzzle, solution)))
return spends
async def sign_clawback_transaction(self, spends: List[Tuple[Program, CoinSpend]], clawback_pubkey) -> SpendBundle:
sigs = []
for puzzle, solution in spends:
pubkey, secretkey = await self.get_keys_pk(clawback_pubkey)
signature = AugSchemeMPL.sign(secretkey, solution.solution.get_tree_hash())
sigs.append(signature)
aggsig = AugSchemeMPL.aggregate(sigs)
solution_list = []
for puzzle, coin_spend in spends:
solution_list.append(coin_spend)
return SpendBundle(solution_list, aggsig)
async def clawback_rl_coin(self, clawback_puzzle_hash: bytes32, fee) -> SpendBundle:
rl_coin = await self._get_rl_coin()
if rl_coin is None:
raise ValueError("rl_coin is None")
transaction = self.generate_unsigned_clawback_transaction(rl_coin, clawback_puzzle_hash, fee)
return await self.sign_clawback_transaction(transaction, self.rl_info.admin_pubkey)
async def clawback_rl_coin_transaction(self, fee) -> TransactionRecord:
to_puzzle_hash = await self.main_wallet.get_new_puzzlehash()
spend_bundle = await self.clawback_rl_coin(to_puzzle_hash, fee)
return TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=to_puzzle_hash,
amount=uint64(0),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
# This is for using the AC locked coin and aggregating it into wallet - must happen in same block as RL Mode 2
async def rl_generate_signed_aggregation_transaction(self, rl_info, consolidating_coin, rl_parent, rl_coin):
if (
rl_info.limit is None
or rl_info.interval is None
or rl_info.user_pubkey is None
or rl_info.admin_pubkey is None
):
raise ValueError("One or more of the elements of rl_info is None")
if self.rl_coin_record is None:
raise ValueError("Rl coin record is None")
list_of_coin_spends = []
self.rl_coin_record = await self._get_rl_coin_record()
pubkey, secretkey = await self.get_keys(self.rl_coin_record.coin.puzzle_hash)
# Spend wallet coin
puzzle = rl_puzzle_for_pk(
rl_info.user_pubkey,
rl_info.limit,
rl_info.interval,
rl_info.rl_origin_id,
rl_info.admin_pubkey,
)
solution = rl_make_solution_mode_2(
rl_coin.puzzle_hash,
consolidating_coin.parent_coin_info,
consolidating_coin.puzzle_hash,
consolidating_coin.amount,
rl_coin.parent_coin_info,
rl_coin.amount,
rl_parent.amount,
rl_parent.parent_coin_info,
)
signature = AugSchemeMPL.sign(secretkey, solution.get_tree_hash())
rl_spend = CoinSpend(self.rl_coin_record.coin, puzzle, solution)
list_of_coin_spends.append(rl_spend)
# Spend consolidating coin
puzzle = rl_make_aggregation_puzzle(self.rl_coin_record.coin.puzzle_hash)
solution = rl_make_aggregation_solution(
consolidating_coin.name(),
self.rl_coin_record.coin.parent_coin_info,
self.rl_coin_record.coin.amount,
)
agg_spend = CoinSpend(consolidating_coin, puzzle, solution)
list_of_coin_spends.append(agg_spend)
aggsig = AugSchemeMPL.aggregate([signature])
return SpendBundle(list_of_coin_spends, aggsig)
def rl_get_aggregation_puzzlehash(self, wallet_puzzle):
puzzle_hash = rl_make_aggregation_puzzle(wallet_puzzle).get_tree_hash()
return puzzle_hash
async def rl_add_funds(self, amount, puzzle_hash, fee):
spend_bundle = await self.main_wallet.generate_signed_transaction(amount, puzzle_hash, fee)
if spend_bundle is None:
return False
await self.main_wallet.push_transaction(spend_bundle)
async def push_transaction(self, tx: TransactionRecord) -> None:
"""Use this API to send transactions."""
await self.wallet_state_manager.add_pending_transaction(tx)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/rl_wallet/rl_wallet.py
| 0.782953 | 0.258935 |
rl_wallet.py
|
pypi
|
import math
from binascii import hexlify
from clvm_tools import binutils
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.util.ints import uint64
from salvia.wallet.salvialisp import sexp
from salvia.wallet.puzzles.load_clvm import load_clvm
RATE_LIMITED_MODE = 1
AGGREGATION_MODE = 2
CLAWBACK_MODE = 3
def rl_puzzle_for_pk(
pubkey: bytes,
rate_amount: uint64,
interval_time: uint64,
origin_id: bytes32,
clawback_pk: bytes,
):
"""
Solution to this puzzle must be in format:
(1 my_parent_id, my_puzzlehash, my_amount, outgoing_puzzle_hash, outgoing_amount,
min_block_time, parent_parent_id, parent_amount, fee)
RATE LIMIT LOGIC:
M - salvia_per_interval
N - interval_blocks
V - amount being spent
MIN_BLOCK_AGE = V / (M / N)
if not (min_block_age * M >= V * N) do X (raise)
ASSERT_COIN_BLOCK_AGE_EXCEEDS min_block_age
"""
MOD = load_clvm("rl.clvm")
return MOD.curry(pubkey, rate_amount, interval_time, origin_id, clawback_pk)
def rl_make_aggregation_solution(myid, wallet_coin_primary_input, wallet_coin_amount):
opcode_myid = "0x" + hexlify(myid).decode("ascii")
primary_input = "0x" + hexlify(wallet_coin_primary_input).decode("ascii")
sol = sexp(opcode_myid, primary_input, wallet_coin_amount)
return Program.to(binutils.assemble(sol))
def make_clawback_solution(puzzlehash, amount, fee):
opcode_create = hexlify(ConditionOpcode.CREATE_COIN).decode("ascii")
solution = sexp(CLAWBACK_MODE, sexp("0x" + opcode_create, "0x" + str(puzzlehash), amount - fee))
return Program.to(binutils.assemble(solution))
def rl_make_solution_mode_2(
my_puzzle_hash,
consolidating_primary_input,
consolidating_coin_puzzle_hash,
outgoing_amount,
my_primary_input,
incoming_amount,
parent_amount,
my_parent_parent_id,
):
my_puzzle_hash = hexlify(my_puzzle_hash).decode("ascii")
consolidating_primary_input = hexlify(consolidating_primary_input).decode("ascii")
consolidating_coin_puzzle_hash = hexlify(consolidating_coin_puzzle_hash).decode("ascii")
primary_input = hexlify(my_primary_input).decode("ascii")
sol = sexp(
AGGREGATION_MODE,
"0x" + my_puzzle_hash,
"0x" + consolidating_primary_input,
"0x" + consolidating_coin_puzzle_hash,
outgoing_amount,
"0x" + primary_input,
incoming_amount,
parent_amount,
"0x" + str(my_parent_parent_id),
)
return Program.to(binutils.assemble(sol))
def solution_for_rl(
my_parent_id: bytes32,
my_puzzlehash: bytes32,
my_amount: uint64,
out_puzzlehash: bytes32,
out_amount: uint64,
my_parent_parent_id: bytes32,
parent_amount: uint64,
interval,
limit,
fee,
):
"""
Solution is (1 my_parent_id, my_puzzlehash, my_amount, outgoing_puzzle_hash, outgoing_amount,
min_block_time, parent_parent_id, parent_amount, fee)
min block time = Math.ceil((new_amount * self.interval) / self.limit)
"""
min_block_count = math.ceil((out_amount * interval) / limit)
solution = sexp(
RATE_LIMITED_MODE,
"0x" + my_parent_id.hex(),
"0x" + my_puzzlehash.hex(),
my_amount,
"0x" + out_puzzlehash.hex(),
out_amount,
min_block_count,
"0x" + my_parent_parent_id.hex(),
parent_amount,
fee,
)
return Program.to(binutils.assemble(solution))
def rl_make_aggregation_puzzle(wallet_puzzle):
"""
If Wallet A wants to send further funds to Wallet B then they can lock them up using this code
Solution will be (my_id wallet_coin_primary_input wallet_coin_amount)
"""
MOD = load_clvm("rl_aggregation.clvm")
return MOD.curry(wallet_puzzle)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/rl_wallet/rl_wallet_puzzles.py
| 0.521715 | 0.231755 |
rl_wallet_puzzles.py
|
pypi
|
from typing import Dict, Optional, Tuple
from salvia.types.blockchain_format.program import Program, INFINITE_COST
from salvia.types.condition_opcodes import ConditionOpcode
from salvia.types.spend_bundle import SpendBundle
from salvia.util.condition_tools import conditions_dict_for_solution
from salvia.wallet.cc_wallet import cc_utils
from salvia.wallet.trade_record import TradeRecord
from salvia.wallet.trading.trade_status import TradeStatus
def trade_status_ui_string(status: TradeStatus):
if status is TradeStatus.PENDING_CONFIRM:
return "Pending Confirmation"
elif status is TradeStatus.CANCELED:
return "Canceled"
elif status is TradeStatus.CONFIRMED:
return "Confirmed"
elif status is TradeStatus.PENDING_CANCEL:
return "Pending Cancellation"
elif status is TradeStatus.FAILED:
return "Failed"
elif status is TradeStatus.PENDING_ACCEPT:
return "Pending"
def trade_record_to_dict(record: TradeRecord) -> Dict:
"""Convenience function to return only part of trade record we care about and show correct status to the ui"""
result = {}
result["trade_id"] = record.trade_id.hex()
result["sent"] = record.sent
result["my_offer"] = record.my_offer
result["created_at_time"] = record.created_at_time
result["accepted_at_time"] = record.accepted_at_time
result["confirmed_at_index"] = record.confirmed_at_index
result["status"] = trade_status_ui_string(TradeStatus(record.status))
success, offer_dict, error = get_discrepancies_for_spend_bundle(record.spend_bundle)
if success is False or offer_dict is None:
raise ValueError(error)
result["offer_dict"] = offer_dict
return result
# Returns the relative difference in value between the amount outputted by a puzzle and solution and a coin's amount
def get_output_discrepancy_for_puzzle_and_solution(coin, puzzle, solution):
discrepancy = coin.amount - get_output_amount_for_puzzle_and_solution(puzzle, solution)
return discrepancy
# Returns the amount of value outputted by a puzzle and solution
def get_output_amount_for_puzzle_and_solution(puzzle: Program, solution: Program) -> int:
error, conditions, cost = conditions_dict_for_solution(puzzle, solution, INFINITE_COST)
total = 0
if conditions:
for _ in conditions.get(ConditionOpcode.CREATE_COIN, []):
total += Program.to(_.vars[1]).as_int()
return total
def get_discrepancies_for_spend_bundle(
trade_offer: SpendBundle,
) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
try:
cc_discrepancies: Dict[str, int] = dict()
for coinsol in trade_offer.coin_spends:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
colour = bytes(genesis_checker).hex()
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
else:
coin_amount = coinsol.coin.amount
out_amount = get_output_amount_for_puzzle_and_solution(puzzle, solution)
diff = coin_amount - out_amount
if "salvia" in cc_discrepancies:
cc_discrepancies["salvia"] = cc_discrepancies["salvia"] + diff
else:
cc_discrepancies["salvia"] = diff
return True, cc_discrepancies, None
except Exception as e:
return False, None, e
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/util/trade_utils.py
| 0.829043 | 0.383468 |
trade_utils.py
|
pypi
|
import base64
import json
from typing import Any
import aiohttp
from blspy import AugSchemeMPL, PrivateKey, PublicKeyMPL, SignatureMPL
from cryptography.fernet import Fernet
from salvia.server.server import ssl_context_for_root
from salvia.ssl.create_ssl import get_mozilla_ca_crt
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.hash import std_hash
from salvia.wallet.derive_keys import master_sk_to_backup_sk
from salvia.wallet.util.wallet_types import WalletType
def open_backup_file(file_path, private_key):
backup_file_text = file_path.read_text()
backup_file_json = json.loads(backup_file_text)
meta_data = backup_file_json["meta_data"]
meta_data_bytes = json.dumps(meta_data).encode()
sig = backup_file_json["signature"]
backup_pk = master_sk_to_backup_sk(private_key)
my_pubkey = backup_pk.get_g1()
key_base_64 = base64.b64encode(bytes(backup_pk))
f = Fernet(key_base_64)
encrypted_data = backup_file_json["data"].encode()
msg = std_hash(encrypted_data) + std_hash(meta_data_bytes)
signature = SignatureMPL.from_bytes(hexstr_to_bytes(sig))
pubkey = PublicKeyMPL.from_bytes(hexstr_to_bytes(meta_data["pubkey"]))
sig_match_my = AugSchemeMPL.verify(my_pubkey, msg, signature)
sig_match_backup = AugSchemeMPL.verify(pubkey, msg, signature)
assert sig_match_my is True
assert sig_match_backup is True
data_bytes = f.decrypt(encrypted_data)
data_text = data_bytes.decode()
data_json = json.loads(data_text)
unencrypted = {}
unencrypted["data"] = data_json
unencrypted["meta_data"] = meta_data
return unencrypted
def get_backup_info(file_path, private_key):
json_dict = open_backup_file(file_path, private_key)
data = json_dict["data"]
wallet_list_json = data["wallet_list"]
info_dict = {}
wallets = []
for wallet_info in wallet_list_json:
wallet = {}
wallet["name"] = wallet_info["name"]
wallet["type"] = wallet_info["type"]
wallet["type_name"] = WalletType(wallet_info["type"]).name
wallet["id"] = wallet_info["id"]
wallet["data"] = wallet_info["data"]
wallets.append(wallet)
info_dict["version"] = data["version"]
info_dict["fingerprint"] = data["fingerprint"]
info_dict["timestamp"] = data["timestamp"]
info_dict["wallets"] = wallets
return info_dict
async def post(session: aiohttp.ClientSession, url: str, data: Any):
mozilla_root = get_mozilla_ca_crt()
ssl_context = ssl_context_for_root(mozilla_root)
response = await session.post(url, json=data, ssl=ssl_context)
return await response.json()
async def get(session: aiohttp.ClientSession, url: str):
response = await session.get(url)
return await response.text()
async def upload_backup(host: str, backup_text: str):
request = {"backup": backup_text}
session = aiohttp.ClientSession()
nonce_url = f"{host}/upload_backup"
upload_response = await post(session, nonce_url, request)
await session.close()
return upload_response
async def download_backup(host: str, private_key: PrivateKey):
session = aiohttp.ClientSession()
try:
backup_privkey = master_sk_to_backup_sk(private_key)
backup_pubkey = bytes(backup_privkey.get_g1()).hex()
# Get nonce
nonce_request = {"pubkey": backup_pubkey}
nonce_url = f"{host}/get_download_nonce"
nonce_response = await post(session, nonce_url, nonce_request)
nonce = nonce_response["nonce"]
# Sign nonce
signature = bytes(AugSchemeMPL.sign(backup_privkey, std_hash(hexstr_to_bytes(nonce)))).hex()
# Request backup url
get_backup_url = f"{host}/download_backup"
backup_request = {"pubkey": backup_pubkey, "signature": signature}
backup_response = await post(session, get_backup_url, backup_request)
if backup_response["success"] is False:
raise ValueError("No backup on backup service")
# Download from s3
backup_url = backup_response["url"]
backup_text = await get(session, backup_url)
await session.close()
return backup_text
except Exception as e:
await session.close()
# Pass exception
raise e
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/util/backup_utils.py
| 0.512205 | 0.173498 |
backup_utils.py
|
pypi
|
from typing import List, Optional
import aiosqlite
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.mempool_inclusion_status import MempoolInclusionStatus
from salvia.util.db_wrapper import DBWrapper
from salvia.util.errors import Err
from salvia.util.ints import uint8, uint32
from salvia.wallet.trade_record import TradeRecord
from salvia.wallet.trading.trade_status import TradeStatus
class TradeStore:
"""
TradeStore stores trading history.
"""
db_connection: aiosqlite.Connection
cache_size: uint32
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper, cache_size: uint32 = uint32(600000)):
self = cls()
self.cache_size = cache_size
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute(
(
"CREATE TABLE IF NOT EXISTS trade_records("
" trade_record blob,"
" trade_id text PRIMARY KEY,"
" status int,"
" confirmed_at_index int,"
" created_at_time bigint,"
" sent int)"
)
)
await self.db_connection.execute(
"CREATE INDEX IF NOT EXISTS trade_confirmed_index on trade_records(confirmed_at_index)"
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS trade_status on trade_records(status)")
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS trade_id on trade_records(trade_id)")
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM trade_records")
await cursor.close()
await self.db_connection.commit()
async def add_trade_record(self, record: TradeRecord, in_transaction) -> None:
"""
Store TradeRecord into DB
"""
if not in_transaction:
await self.db_wrapper.lock.acquire()
try:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO trade_records VALUES(?, ?, ?, ?, ?, ?)",
(
bytes(record),
record.trade_id.hex(),
record.status,
record.confirmed_at_index,
record.created_at_time,
record.sent,
),
)
await cursor.close()
finally:
if not in_transaction:
await self.db_connection.commit()
self.db_wrapper.lock.release()
async def set_status(self, trade_id: bytes32, status: TradeStatus, in_transaction: bool, index: uint32 = uint32(0)):
"""
Updates the status of the trade
"""
current: Optional[TradeRecord] = await self.get_trade_record(trade_id)
if current is None:
return None
confirmed_at_index = current.confirmed_at_index
if index != 0:
confirmed_at_index = index
tx: TradeRecord = TradeRecord(
confirmed_at_index=confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=current.sent,
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=uint32(status.value),
sent_to=current.sent_to,
)
await self.add_trade_record(tx, in_transaction)
async def increment_sent(
self,
id: bytes32,
name: str,
send_status: MempoolInclusionStatus,
err: Optional[Err],
) -> bool:
"""
Updates trade sent count (Full Node has received spend_bundle and sent ack).
"""
current: Optional[TradeRecord] = await self.get_trade_record(id)
if current is None:
return False
sent_to = current.sent_to.copy()
err_str = err.name if err is not None else None
append_data = (name, uint8(send_status.value), err_str)
# Don't increment count if it's already sent to this peer
if append_data in sent_to:
return False
sent_to.append(append_data)
tx: TradeRecord = TradeRecord(
confirmed_at_index=current.confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=uint32(current.sent + 1),
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=current.status,
sent_to=sent_to,
)
await self.add_trade_record(tx, False)
return True
async def set_not_sent(self, id: bytes32):
"""
Updates trade sent count to 0.
"""
current: Optional[TradeRecord] = await self.get_trade_record(id)
if current is None:
return None
tx: TradeRecord = TradeRecord(
confirmed_at_index=current.confirmed_at_index,
accepted_at_time=current.accepted_at_time,
created_at_time=current.created_at_time,
my_offer=current.my_offer,
sent=uint32(0),
spend_bundle=current.spend_bundle,
tx_spend_bundle=current.tx_spend_bundle,
additions=current.additions,
removals=current.removals,
trade_id=current.trade_id,
status=uint32(TradeStatus.PENDING_CONFIRM.value),
sent_to=[],
)
await self.add_trade_record(tx, False)
async def get_trade_record(self, trade_id: bytes32) -> Optional[TradeRecord]:
"""
Checks DB for TradeRecord with id: id and returns it.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE trade_id=?", (trade_id.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
record = TradeRecord.from_bytes(row[0])
return record
return None
async def get_trade_record_with_status(self, status: TradeStatus) -> List[TradeRecord]:
"""
Checks DB for TradeRecord with id: id and returns it.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE status=?", (status.value,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_not_sent(self) -> List[TradeRecord]:
"""
Returns the list of trades that have not been received by full node yet.
"""
cursor = await self.db_connection.execute(
"SELECT * from trade_records WHERE sent<? and confirmed=?",
(
4,
0,
),
)
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_unconfirmed(self) -> List[TradeRecord]:
"""
Returns the list of all trades that have not yet been confirmed.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE confirmed=?", (0,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_all_trades(self) -> List[TradeRecord]:
"""
Returns all stored trades.
"""
cursor = await self.db_connection.execute("SELECT * from trade_records")
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def get_trades_above(self, height: uint32) -> List[TradeRecord]:
cursor = await self.db_connection.execute("SELECT * from trade_records WHERE confirmed_at_index>?", (height,))
rows = await cursor.fetchall()
await cursor.close()
records = []
for row in rows:
record = TradeRecord.from_bytes(row[0])
records.append(record)
return records
async def rollback_to_block(self, block_index):
# Delete from storage
cursor = await self.db_connection.execute(
"DELETE FROM trade_records WHERE confirmed_at_index>?", (block_index,)
)
await cursor.close()
await self.db_connection.commit()
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/trading/trade_store.py
| 0.76947 | 0.180504 |
trade_store.py
|
pypi
|
from __future__ import annotations
import logging
import time
from dataclasses import replace
from secrets import token_bytes
from typing import Any, Dict, List, Optional, Set
from blspy import AugSchemeMPL, G2Element
from salvia.consensus.cost_calculator import calculate_cost_of_program, NPCResult
from salvia.full_node.bundle_tools import simple_solution_generator
from salvia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from salvia.protocols.wallet_protocol import PuzzleSolutionResponse
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.coin_spend import CoinSpend
from salvia.types.generator_types import BlockGenerator
from salvia.types.spend_bundle import SpendBundle
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.condition_tools import conditions_dict_for_solution, pkm_pairs_for_conditions_dict
from salvia.util.ints import uint8, uint32, uint64, uint128
from salvia.util.json_util import dict_to_json_str
from salvia.wallet.block_record import HeaderBlockRecord
from salvia.wallet.cc_wallet.cc_info import CCInfo
from salvia.wallet.cc_wallet.cc_utils import (
CC_MOD,
SpendableCC,
cc_puzzle_for_inner_puzzle,
cc_puzzle_hash_for_inner_puzzle_hash,
get_lineage_proof_from_coin_and_puz,
spend_bundle_for_spendable_ccs,
uncurry_cc,
)
from salvia.wallet.derivation_record import DerivationRecord
from salvia.wallet.puzzles.genesis_by_coin_id_with_0 import (
create_genesis_or_zero_coin_checker,
genesis_coin_id_for_genesis_coin_checker,
lineage_proof_for_genesis,
)
from salvia.wallet.puzzles.p2_delegated_puzzle_or_hidden_puzzle import (
DEFAULT_HIDDEN_PUZZLE_HASH,
calculate_synthetic_secret_key,
)
from salvia.wallet.transaction_record import TransactionRecord
from salvia.wallet.util.transaction_type import TransactionType
from salvia.wallet.util.wallet_types import WalletType
from salvia.wallet.wallet import Wallet
from salvia.wallet.wallet_coin_record import WalletCoinRecord
from salvia.wallet.wallet_info import WalletInfo
class CCWallet:
wallet_state_manager: Any
log: logging.Logger
wallet_info: WalletInfo
cc_coin_record: WalletCoinRecord
cc_info: CCInfo
standard_wallet: Wallet
base_puzzle_program: Optional[bytes]
base_inner_puzzle_hash: Optional[bytes32]
cost_of_single_tx: Optional[int]
@staticmethod
async def create_new_cc(
wallet_state_manager: Any,
wallet: Wallet,
amount: uint64,
):
self = CCWallet()
self.cost_of_single_tx = None
self.base_puzzle_program = None
self.base_inner_puzzle_hash = None
self.standard_wallet = wallet
self.log = logging.getLogger(__name__)
std_wallet_id = self.standard_wallet.wallet_id
bal = await wallet_state_manager.get_confirmed_balance_for_wallet(std_wallet_id, None)
if amount > bal:
raise ValueError("Not enough balance")
self.wallet_state_manager = wallet_state_manager
self.cc_info = CCInfo(None, [])
info_as_string = bytes(self.cc_info).hex()
self.wallet_info = await wallet_state_manager.user_store.create_wallet(
"CC Wallet", WalletType.COLOURED_COIN, info_as_string
)
if self.wallet_info is None:
raise ValueError("Internal Error")
try:
spend_bundle = await self.generate_new_coloured_coin(amount)
except Exception:
await wallet_state_manager.user_store.delete_wallet(self.id())
raise
if spend_bundle is None:
await wallet_state_manager.user_store.delete_wallet(self.id())
raise ValueError("Failed to create spend.")
await self.wallet_state_manager.add_new_wallet(self, self.id())
# Change and actual coloured coin
non_ephemeral_spends: List[Coin] = spend_bundle.not_ephemeral_additions()
cc_coin = None
puzzle_store = self.wallet_state_manager.puzzle_store
for c in non_ephemeral_spends:
info = await puzzle_store.wallet_info_for_puzzle_hash(c.puzzle_hash)
if info is None:
raise ValueError("Internal Error")
id, wallet_type = info
if id == self.id():
cc_coin = c
if cc_coin is None:
raise ValueError("Internal Error, unable to generate new coloured coin")
regular_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=cc_coin.puzzle_hash,
amount=uint64(cc_coin.amount),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.wallet_state_manager.main_wallet.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=token_bytes(),
)
cc_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=cc_coin.puzzle_hash,
amount=uint64(cc_coin.amount),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=None,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=token_bytes(),
)
await self.standard_wallet.push_transaction(regular_record)
await self.standard_wallet.push_transaction(cc_record)
return self
@staticmethod
async def create_wallet_for_cc(
wallet_state_manager: Any,
wallet: Wallet,
genesis_checker_hex: str,
) -> CCWallet:
self = CCWallet()
self.cost_of_single_tx = None
self.base_puzzle_program = None
self.base_inner_puzzle_hash = None
self.standard_wallet = wallet
self.log = logging.getLogger(__name__)
self.wallet_state_manager = wallet_state_manager
self.cc_info = CCInfo(Program.from_bytes(bytes.fromhex(genesis_checker_hex)), [])
info_as_string = bytes(self.cc_info).hex()
self.wallet_info = await wallet_state_manager.user_store.create_wallet(
"CC Wallet", WalletType.COLOURED_COIN, info_as_string
)
if self.wallet_info is None:
raise Exception("wallet_info is None")
await self.wallet_state_manager.add_new_wallet(self, self.id())
return self
@staticmethod
async def create(
wallet_state_manager: Any,
wallet: Wallet,
wallet_info: WalletInfo,
) -> CCWallet:
self = CCWallet()
self.log = logging.getLogger(__name__)
self.cost_of_single_tx = None
self.wallet_state_manager = wallet_state_manager
self.wallet_info = wallet_info
self.standard_wallet = wallet
self.cc_info = CCInfo.from_bytes(hexstr_to_bytes(self.wallet_info.data))
self.base_puzzle_program = None
self.base_inner_puzzle_hash = None
return self
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.COLOURED_COIN)
def id(self) -> uint32:
return self.wallet_info.id
async def get_confirmed_balance(self, record_list: Optional[Set[WalletCoinRecord]] = None) -> uint64:
if record_list is None:
record_list = await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.id())
amount: uint64 = uint64(0)
for record in record_list:
lineage = await self.get_lineage_proof_for_coin(record.coin)
if lineage is not None:
amount = uint64(amount + record.coin.amount)
self.log.info(f"Confirmed balance for cc wallet {self.id()} is {amount}")
return uint64(amount)
async def get_unconfirmed_balance(self, unspent_records=None) -> uint128:
confirmed = await self.get_confirmed_balance(unspent_records)
unconfirmed_tx: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
self.id()
)
addition_amount = 0
removal_amount = 0
for record in unconfirmed_tx:
if TransactionType(record.type) is TransactionType.INCOMING_TX:
addition_amount += record.amount
else:
removal_amount += record.amount
result = confirmed - removal_amount + addition_amount
self.log.info(f"Unconfirmed balance for cc wallet {self.id()} is {result}")
return uint128(result)
async def get_max_send_amount(self, records=None):
spendable: List[WalletCoinRecord] = list(
await self.wallet_state_manager.get_spendable_coins_for_wallet(self.id(), records)
)
if len(spendable) == 0:
return 0
spendable.sort(reverse=True, key=lambda record: record.coin.amount)
if self.cost_of_single_tx is None:
coin = spendable[0].coin
tx = await self.generate_signed_transaction(
[coin.amount], [coin.puzzle_hash], coins={coin}, ignore_max_send_amount=True
)
program: BlockGenerator = simple_solution_generator(tx.spend_bundle)
# npc contains names of the coins removed, puzzle_hashes and their spend conditions
result: NPCResult = get_name_puzzle_conditions(
program,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
cost_per_byte=self.wallet_state_manager.constants.COST_PER_BYTE,
safe_mode=True,
)
cost_result: uint64 = calculate_cost_of_program(
program.program, result, self.wallet_state_manager.constants.COST_PER_BYTE
)
self.cost_of_single_tx = cost_result
self.log.info(f"Cost of a single tx for standard wallet: {self.cost_of_single_tx}")
max_cost = self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM / 2 # avoid full block TXs
current_cost = 0
total_amount = 0
total_coin_count = 0
for record in spendable:
current_cost += self.cost_of_single_tx
total_amount += record.coin.amount
total_coin_count += 1
if current_cost + self.cost_of_single_tx > max_cost:
break
return total_amount
async def get_name(self):
return self.wallet_info.name
async def set_name(self, new_name: str):
new_info = replace(self.wallet_info, name=new_name)
self.wallet_info = new_info
await self.wallet_state_manager.user_store.update_wallet(self.wallet_info, False)
def get_colour(self) -> str:
assert self.cc_info.my_genesis_checker is not None
return bytes(self.cc_info.my_genesis_checker).hex()
async def coin_added(self, coin: Coin, height: uint32):
"""Notification from wallet state manager that wallet has been received."""
self.log.info(f"CC wallet has been notified that {coin} was added")
search_for_parent: bool = True
inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)
lineage_proof = Program.to((1, [coin.parent_coin_info, inner_puzzle.get_tree_hash(), coin.amount]))
await self.add_lineage(coin.name(), lineage_proof, True)
for name, lineage_proofs in self.cc_info.lineage_proofs:
if coin.parent_coin_info == name:
search_for_parent = False
break
if search_for_parent:
data: Dict[str, Any] = {
"data": {
"action_data": {
"api_name": "request_puzzle_solution",
"height": height,
"coin_name": coin.parent_coin_info,
"received_coin": coin.name(),
}
}
}
data_str = dict_to_json_str(data)
await self.wallet_state_manager.create_action(
name="request_puzzle_solution",
wallet_id=self.id(),
wallet_type=self.type(),
callback="puzzle_solution_received",
done=False,
data=data_str,
in_transaction=True,
)
async def puzzle_solution_received(self, response: PuzzleSolutionResponse, action_id: int):
coin_name = response.coin_name
height = response.height
puzzle: Program = response.puzzle
r = uncurry_cc(puzzle)
header_hash = self.wallet_state_manager.blockchain.height_to_hash(height)
block: Optional[
HeaderBlockRecord
] = await self.wallet_state_manager.blockchain.block_store.get_header_block_record(header_hash)
if block is None:
return None
removals = block.removals
if r is not None:
mod_hash, genesis_coin_checker, inner_puzzle = r
self.log.info(f"parent: {coin_name} inner_puzzle for parent is {inner_puzzle}")
parent_coin = None
for coin in removals:
if coin.name() == coin_name:
parent_coin = coin
if parent_coin is None:
raise ValueError("Error in finding parent")
lineage_proof = get_lineage_proof_from_coin_and_puz(parent_coin, puzzle)
await self.add_lineage(coin_name, lineage_proof)
await self.wallet_state_manager.action_store.action_done(action_id)
async def get_new_inner_hash(self) -> bytes32:
return await self.standard_wallet.get_new_puzzlehash()
async def get_new_inner_puzzle(self) -> Program:
return await self.standard_wallet.get_new_puzzle()
async def get_puzzle_hash(self, new: bool):
return await self.standard_wallet.get_puzzle_hash(new)
async def get_new_puzzlehash(self) -> bytes32:
return await self.standard_wallet.get_new_puzzlehash()
def puzzle_for_pk(self, pubkey) -> Program:
inner_puzzle = self.standard_wallet.puzzle_for_pk(bytes(pubkey))
cc_puzzle: Program = cc_puzzle_for_inner_puzzle(CC_MOD, self.cc_info.my_genesis_checker, inner_puzzle)
self.base_puzzle_program = bytes(cc_puzzle)
self.base_inner_puzzle_hash = inner_puzzle.get_tree_hash()
return cc_puzzle
async def get_new_cc_puzzle_hash(self):
return (await self.wallet_state_manager.get_unused_derivation_record(self.id())).puzzle_hash
# Create a new coin of value 0 with a given colour
async def generate_zero_val_coin(self, send=True, exclude: List[Coin] = None) -> SpendBundle:
if self.cc_info.my_genesis_checker is None:
raise ValueError("My genesis checker is None")
if exclude is None:
exclude = []
coins = await self.standard_wallet.select_coins(0, exclude)
assert coins != set()
origin = coins.copy().pop()
origin_id = origin.name()
cc_inner = await self.get_new_inner_hash()
cc_puzzle_hash: Program = cc_puzzle_hash_for_inner_puzzle_hash(
CC_MOD, self.cc_info.my_genesis_checker, cc_inner
)
tx: TransactionRecord = await self.standard_wallet.generate_signed_transaction(
uint64(0), cc_puzzle_hash, uint64(0), origin_id, coins
)
assert tx.spend_bundle is not None
full_spend: SpendBundle = tx.spend_bundle
self.log.info(f"Generate zero val coin: cc_puzzle_hash is {cc_puzzle_hash}")
# generate eve coin so we can add future lineage_proofs even if we don't eve spend
eve_coin = Coin(origin_id, cc_puzzle_hash, uint64(0))
await self.add_lineage(
eve_coin.name(),
Program.to(
(
1,
[eve_coin.parent_coin_info, cc_inner, eve_coin.amount],
)
),
)
await self.add_lineage(eve_coin.parent_coin_info, Program.to((0, [origin.as_list(), 1])))
if send:
regular_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=cc_puzzle_hash,
amount=uint64(0),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(10),
spend_bundle=full_spend,
additions=full_spend.additions(),
removals=full_spend.removals(),
wallet_id=uint32(1),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=token_bytes(),
)
cc_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=cc_puzzle_hash,
amount=uint64(0),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=full_spend,
additions=full_spend.additions(),
removals=full_spend.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.INCOMING_TX.value),
name=full_spend.name(),
)
await self.wallet_state_manager.add_transaction(regular_record)
await self.wallet_state_manager.add_pending_transaction(cc_record)
return full_spend
async def get_spendable_balance(self, records=None) -> uint64:
coins = await self.get_cc_spendable_coins(records)
amount = 0
for record in coins:
amount += record.coin.amount
return uint64(amount)
async def get_pending_change_balance(self) -> uint64:
unconfirmed_tx = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.id())
addition_amount = 0
for record in unconfirmed_tx:
if not record.is_in_mempool():
continue
our_spend = False
for coin in record.removals:
# Don't count eve spend as change
if coin.parent_coin_info.hex() == self.get_colour():
continue
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
our_spend = True
break
if our_spend is not True:
continue
for coin in record.additions:
if await self.wallet_state_manager.does_coin_belong_to_wallet(coin, self.id()):
addition_amount += coin.amount
return uint64(addition_amount)
async def get_cc_spendable_coins(self, records=None) -> List[WalletCoinRecord]:
result: List[WalletCoinRecord] = []
record_list: Set[WalletCoinRecord] = await self.wallet_state_manager.get_spendable_coins_for_wallet(
self.id(), records
)
for record in record_list:
lineage = await self.get_lineage_proof_for_coin(record.coin)
if lineage is not None:
result.append(record)
return result
async def select_coins(self, amount: uint64) -> Set[Coin]:
"""
Returns a set of coins that can be used for generating a new transaction.
Note: Must be called under wallet state manager lock
"""
spendable_am = await self.get_confirmed_balance()
if amount > spendable_am:
error_msg = f"Can't select amount higher than our spendable balance {amount}, spendable {spendable_am}"
self.log.warning(error_msg)
raise ValueError(error_msg)
self.log.info(f"About to select coins for amount {amount}")
spendable: List[WalletCoinRecord] = await self.get_cc_spendable_coins()
sum = 0
used_coins: Set = set()
# Use older coins first
spendable.sort(key=lambda r: r.confirmed_block_height)
# Try to use coins from the store, if there isn't enough of "unused"
# coins use change coins that are not confirmed yet
unconfirmed_removals: Dict[bytes32, Coin] = await self.wallet_state_manager.unconfirmed_removals_for_wallet(
self.id()
)
for coinrecord in spendable:
if sum >= amount and len(used_coins) > 0:
break
if coinrecord.coin.name() in unconfirmed_removals:
continue
sum += coinrecord.coin.amount
used_coins.add(coinrecord.coin)
self.log.info(f"Selected coin: {coinrecord.coin.name()} at height {coinrecord.confirmed_block_height}!")
# This happens when we couldn't use one of the coins because it's already used
# but unconfirmed, and we are waiting for the change. (unconfirmed_additions)
if sum < amount:
raise ValueError(
"Can't make this transaction at the moment. Waiting for the change from the previous transaction."
)
self.log.info(f"Successfully selected coins: {used_coins}")
return used_coins
async def get_sigs(self, innerpuz: Program, innersol: Program, coin_name: bytes32) -> List[G2Element]:
puzzle_hash = innerpuz.get_tree_hash()
pubkey, private = await self.wallet_state_manager.get_keys(puzzle_hash)
synthetic_secret_key = calculate_synthetic_secret_key(private, DEFAULT_HIDDEN_PUZZLE_HASH)
sigs: List[G2Element] = []
error, conditions, cost = conditions_dict_for_solution(
innerpuz, innersol, self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM
)
if conditions is not None:
for _, msg in pkm_pairs_for_conditions_dict(
conditions, coin_name, self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA
):
signature = AugSchemeMPL.sign(synthetic_secret_key, msg)
sigs.append(signature)
return sigs
async def inner_puzzle_for_cc_puzhash(self, cc_hash: bytes32) -> Program:
record: DerivationRecord = await self.wallet_state_manager.puzzle_store.get_derivation_record_for_puzzle_hash(
cc_hash
)
inner_puzzle: Program = self.standard_wallet.puzzle_for_pk(bytes(record.pubkey))
return inner_puzzle
async def get_lineage_proof_for_coin(self, coin) -> Optional[Program]:
for name, proof in self.cc_info.lineage_proofs:
if name == coin.parent_coin_info:
return proof
return None
async def generate_signed_transaction(
self,
amounts: List[uint64],
puzzle_hashes: List[bytes32],
fee: uint64 = uint64(0),
origin_id: bytes32 = None,
coins: Set[Coin] = None,
ignore_max_send_amount: bool = False,
) -> TransactionRecord:
# Get coins and calculate amount of change required
outgoing_amount = uint64(sum(amounts))
total_outgoing = outgoing_amount + fee
if not ignore_max_send_amount:
max_send = await self.get_max_send_amount()
if total_outgoing > max_send:
raise ValueError(f"Can't send more than {max_send} in a single transaction")
if coins is None:
selected_coins: Set[Coin] = await self.select_coins(uint64(total_outgoing))
else:
selected_coins = coins
total_amount = sum([x.amount for x in selected_coins])
change = total_amount - total_outgoing
primaries = []
for amount, puzzle_hash in zip(amounts, puzzle_hashes):
primaries.append({"puzzlehash": puzzle_hash, "amount": amount})
if change > 0:
changepuzzlehash = await self.get_new_inner_hash()
primaries.append({"puzzlehash": changepuzzlehash, "amount": change})
coin = list(selected_coins)[0]
inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)
if self.cc_info.my_genesis_checker is None:
raise ValueError("My genesis checker is None")
genesis_id = genesis_coin_id_for_genesis_coin_checker(self.cc_info.my_genesis_checker)
spendable_cc_list = []
innersol_list = []
sigs: List[G2Element] = []
first = True
for coin in selected_coins:
coin_inner_puzzle = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)
if first:
first = False
if fee > 0:
innersol = self.standard_wallet.make_solution(primaries=primaries, fee=fee)
else:
innersol = self.standard_wallet.make_solution(primaries=primaries)
else:
innersol = self.standard_wallet.make_solution()
innersol_list.append(innersol)
lineage_proof = await self.get_lineage_proof_for_coin(coin)
assert lineage_proof is not None
spendable_cc_list.append(SpendableCC(coin, genesis_id, inner_puzzle, lineage_proof))
sigs = sigs + await self.get_sigs(coin_inner_puzzle, innersol, coin.name())
spend_bundle = spend_bundle_for_spendable_ccs(
CC_MOD,
self.cc_info.my_genesis_checker,
spendable_cc_list,
innersol_list,
sigs,
)
# TODO add support for array in stored records
return TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=puzzle_hashes[0],
amount=uint64(outgoing_amount),
fee_amount=uint64(0),
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
async def add_lineage(self, name: bytes32, lineage: Optional[Program], in_transaction=False):
self.log.info(f"Adding parent {name}: {lineage}")
current_list = self.cc_info.lineage_proofs.copy()
current_list.append((name, lineage))
cc_info: CCInfo = CCInfo(self.cc_info.my_genesis_checker, current_list)
await self.save_info(cc_info, in_transaction)
async def save_info(self, cc_info: CCInfo, in_transaction):
self.cc_info = cc_info
current_info = self.wallet_info
data_str = bytes(cc_info).hex()
wallet_info = WalletInfo(current_info.id, current_info.name, current_info.type, data_str)
self.wallet_info = wallet_info
await self.wallet_state_manager.user_store.update_wallet(wallet_info, in_transaction)
async def generate_new_coloured_coin(self, amount: uint64) -> SpendBundle:
coins = await self.standard_wallet.select_coins(amount)
origin = coins.copy().pop()
origin_id = origin.name()
cc_inner_hash = await self.get_new_inner_hash()
await self.add_lineage(origin_id, Program.to((0, [origin.as_list(), 0])))
genesis_coin_checker = create_genesis_or_zero_coin_checker(origin_id)
minted_cc_puzzle_hash = cc_puzzle_hash_for_inner_puzzle_hash(CC_MOD, genesis_coin_checker, cc_inner_hash)
tx_record: TransactionRecord = await self.standard_wallet.generate_signed_transaction(
amount, minted_cc_puzzle_hash, uint64(0), origin_id, coins
)
assert tx_record.spend_bundle is not None
lineage_proof: Optional[Program] = lineage_proof_for_genesis(origin)
lineage_proofs = [(origin_id, lineage_proof)]
cc_info: CCInfo = CCInfo(genesis_coin_checker, lineage_proofs)
await self.save_info(cc_info, False)
return tx_record.spend_bundle
async def create_spend_bundle_relative_amount(self, cc_amount, zero_coin: Coin = None) -> Optional[SpendBundle]:
# If we're losing value then get coloured coins with at least that much value
# If we're gaining value then our amount doesn't matter
if cc_amount < 0:
cc_spends = await self.select_coins(abs(cc_amount))
else:
if zero_coin is None:
return None
cc_spends = set()
cc_spends.add(zero_coin)
if cc_spends is None:
return None
# Calculate output amount given relative difference and sum of actual values
spend_value = sum([coin.amount for coin in cc_spends])
cc_amount = spend_value + cc_amount
# Loop through coins and create solution for innerpuzzle
list_of_solutions = []
output_created = None
sigs: List[G2Element] = []
for coin in cc_spends:
if output_created is None:
newinnerpuzhash = await self.get_new_inner_hash()
innersol = self.standard_wallet.make_solution(
primaries=[{"puzzlehash": newinnerpuzhash, "amount": cc_amount}]
)
output_created = coin
else:
innersol = self.standard_wallet.make_solution(consumed=[output_created.name()])
innerpuz: Program = await self.inner_puzzle_for_cc_puzhash(coin.puzzle_hash)
sigs = sigs + await self.get_sigs(innerpuz, innersol, coin.name())
lineage_proof = await self.get_lineage_proof_for_coin(coin)
puzzle_reveal = cc_puzzle_for_inner_puzzle(CC_MOD, self.cc_info.my_genesis_checker, innerpuz)
# Use coin info to create solution and add coin and solution to list of CoinSpends
solution = [
innersol,
coin.as_list(),
lineage_proof,
None,
None,
None,
None,
None,
]
list_of_solutions.append(CoinSpend(coin, puzzle_reveal, Program.to(solution)))
aggsig = AugSchemeMPL.aggregate(sigs)
return SpendBundle(list_of_solutions, aggsig)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/wallet/cc_wallet/cc_wallet.py
| 0.658088 | 0.270325 |
cc_wallet.py
|
pypi
|
from dataclasses import dataclass
from enum import IntEnum
from typing import Optional, Dict
from blspy import G1Element
from salvia.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.program import Program
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.ints import uint32, uint8
from salvia.util.streamable import streamable, Streamable
class PoolSingletonState(IntEnum):
"""
From the user's point of view, a pool group can be in these states:
`SELF_POOLING`: The singleton exists on the blockchain, and we are farming
block rewards to a wallet address controlled by the user
`LEAVING_POOL`: The singleton exists, and we have entered the "escaping" state, which
means we are waiting for a number of blocks = `relative_lock_height` to pass, so we can leave.
`FARMING_TO_POOL`: The singleton exists, and it is assigned to a pool.
`CLAIMING_SELF_POOLED_REWARDS`: We have submitted a transaction to sweep our
self-pooled funds.
"""
SELF_POOLING = 1
LEAVING_POOL = 2
FARMING_TO_POOL = 3
SELF_POOLING = PoolSingletonState.SELF_POOLING
LEAVING_POOL = PoolSingletonState.LEAVING_POOL
FARMING_TO_POOL = PoolSingletonState.FARMING_TO_POOL
@dataclass(frozen=True)
@streamable
class PoolState(Streamable):
"""
`PoolState` is a type that is serialized to the blockchain to track the state of the user's pool singleton
`target_puzzle_hash` is either the pool address, or the self-pooling address that pool rewards will be paid to.
`target_puzzle_hash` is NOT the p2_singleton puzzle that block rewards are sent to.
The `p2_singleton` address is the initial address, and the `target_puzzle_hash` is the final destination.
`relative_lock_height` is zero when in SELF_POOLING state
"""
version: uint8
state: uint8 # PoolSingletonState
# `target_puzzle_hash`: A puzzle_hash we pay to
# When self-farming, this is a main wallet address
# When farming-to-pool, the pool sends this to the farmer during pool protocol setup
target_puzzle_hash: bytes32 # TODO: rename target_puzzle_hash -> pay_to_address
# owner_pubkey is set by the wallet, once
owner_pubkey: G1Element
pool_url: Optional[str]
relative_lock_height: uint32
def initial_pool_state_from_dict(state_dict: Dict, owner_pubkey: G1Element, owner_puzzle_hash: bytes32) -> PoolState:
state_str = state_dict["state"]
singleton_state: PoolSingletonState = PoolSingletonState[state_str]
if singleton_state == SELF_POOLING:
target_puzzle_hash = owner_puzzle_hash
pool_url: str = ""
relative_lock_height = uint32(0)
elif singleton_state == FARMING_TO_POOL:
target_puzzle_hash = bytes32(hexstr_to_bytes(state_dict["target_puzzle_hash"]))
pool_url = state_dict["pool_url"]
relative_lock_height = uint32(state_dict["relative_lock_height"])
else:
raise ValueError("Initial state must be SELF_POOLING or FARMING_TO_POOL")
# TODO: change create_pool_state to return error messages, as well
assert relative_lock_height is not None
return create_pool_state(singleton_state, target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height)
def create_pool_state(
state: PoolSingletonState,
target_puzzle_hash: bytes32,
owner_pubkey: G1Element,
pool_url: Optional[str],
relative_lock_height: uint32,
) -> PoolState:
if state not in set(s.value for s in PoolSingletonState):
raise AssertionError("state {state} is not a valid PoolSingletonState,")
ps = PoolState(
POOL_PROTOCOL_VERSION, uint8(state), target_puzzle_hash, owner_pubkey, pool_url, relative_lock_height
)
# TODO Move verify here
return ps
@dataclass(frozen=True)
@streamable
class PoolWalletInfo(Streamable):
"""
Internal Pool Wallet state, not destined for the blockchain. This can be completely derived with
the Singleton's CoinSpends list, or with the information from the WalletPoolStore.
"""
current: PoolState
target: Optional[PoolState]
launcher_coin: Coin
launcher_id: bytes32
p2_singleton_puzzle_hash: bytes32
current_inner: Program # Inner puzzle in current singleton, not revealed yet
tip_singleton_coin_id: bytes32
singleton_block_height: uint32 # Block height that current PoolState is from
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/pools/pool_wallet_info.py
| 0.726911 | 0.412116 |
pool_wallet_info.py
|
pypi
|
import logging
from dataclasses import dataclass
from pathlib import Path
from typing import List
from blspy import G1Element
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.util.byte_types import hexstr_to_bytes
from salvia.util.config import load_config, save_config
from salvia.util.streamable import Streamable, streamable
"""
Config example
This is what goes into the user's config file, to communicate between the wallet and the farmer processes.
pool_list:
launcher_id: ae4ef3b9bfe68949691281a015a9c16630fc8f66d48c19ca548fb80768791afa
authentication_public_key: 970e181ae45435ae696508a78012dc80548c334cf29676ea6ade7049eb9d2b9579cc30cb44c3fd68d35a250cfbc69e29
owner_public_key: 84c3fcf9d5581c1ddc702cb0f3b4a06043303b334dd993ab42b2c320ebfa98e5ce558448615b3f69638ba92cf7f43da5
payout_instructions: c2b08e41d766da4116e388357ed957d04ad754623a915f3fd65188a8746cf3e8
pool_url: localhost
p2_singleton_puzzle_hash: 2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824
target_puzzle_hash: 344587cf06a39db471d2cc027504e8688a0a67cce961253500c956c73603fd58
""" # noqa
log = logging.getLogger(__name__)
@dataclass(frozen=True)
@streamable
class PoolWalletConfig(Streamable):
launcher_id: bytes32
pool_url: str
payout_instructions: str
target_puzzle_hash: bytes32
p2_singleton_puzzle_hash: bytes32
owner_public_key: G1Element
authentication_public_key: G1Element
def load_pool_config(root_path: Path) -> List[PoolWalletConfig]:
config = load_config(root_path, "config.yaml")
ret_list: List[PoolWalletConfig] = []
if "pool_list" in config["pool"]:
for pool_config_dict in config["pool"]["pool_list"]:
try:
pool_config = PoolWalletConfig(
hexstr_to_bytes(pool_config_dict["launcher_id"]),
pool_config_dict["pool_url"],
pool_config_dict["payout_instructions"],
hexstr_to_bytes(pool_config_dict["target_puzzle_hash"]),
hexstr_to_bytes(pool_config_dict["p2_singleton_puzzle_hash"]),
G1Element.from_bytes(hexstr_to_bytes(pool_config_dict["owner_public_key"])),
G1Element.from_bytes(hexstr_to_bytes(pool_config_dict["authentication_public_key"])),
)
ret_list.append(pool_config)
except Exception as e:
log.error(f"Exception loading config: {pool_config_dict} {e}")
return ret_list
async def update_pool_config(root_path: Path, pool_config_list: List[PoolWalletConfig]):
full_config = load_config(root_path, "config.yaml")
full_config["pool"]["pool_list"] = [c.to_json_dict() for c in pool_config_list]
save_config(root_path, "config.yaml", full_config)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/pools/pool_config.py
| 0.555797 | 0.221288 |
pool_config.py
|
pypi
|
import logging
import time
from typing import Any, Optional, Set, Tuple, List, Dict
from blspy import PrivateKey, G2Element, G1Element
from salvia.consensus.block_record import BlockRecord
from salvia.pools.pool_config import PoolWalletConfig, load_pool_config, update_pool_config
from salvia.pools.pool_wallet_info import (
PoolWalletInfo,
PoolSingletonState,
PoolState,
FARMING_TO_POOL,
SELF_POOLING,
LEAVING_POOL,
create_pool_state,
)
from salvia.protocols.pool_protocol import POOL_PROTOCOL_VERSION
from salvia.types.announcement import Announcement
from salvia.types.blockchain_format.coin import Coin
from salvia.types.blockchain_format.sized_bytes import bytes32
from salvia.types.blockchain_format.program import Program, SerializedProgram
from salvia.types.coin_record import CoinRecord
from salvia.types.coin_spend import CoinSpend
from salvia.types.spend_bundle import SpendBundle
from salvia.pools.pool_puzzles import (
create_waiting_room_inner_puzzle,
create_full_puzzle,
SINGLETON_LAUNCHER,
create_pooling_inner_puzzle,
solution_to_pool_state,
pool_state_to_inner_puzzle,
get_most_recent_singleton_coin_from_coin_spend,
launcher_id_to_p2_puzzle_hash,
create_travel_spend,
uncurry_pool_member_inner_puzzle,
create_absorb_spend,
is_pool_member_inner_puzzle,
is_pool_waitingroom_inner_puzzle,
uncurry_pool_waitingroom_inner_puzzle,
get_delayed_puz_info_from_launcher_spend,
)
from salvia.util.ints import uint8, uint32, uint64
from salvia.wallet.derive_keys import (
master_sk_to_pooling_authentication_sk,
find_owner_sk,
)
from salvia.wallet.sign_coin_spends import sign_coin_spends
from salvia.wallet.transaction_record import TransactionRecord
from salvia.wallet.util.wallet_types import WalletType
from salvia.wallet.wallet import Wallet
from salvia.wallet.wallet_coin_record import WalletCoinRecord
from salvia.wallet.wallet_info import WalletInfo
from salvia.wallet.util.transaction_type import TransactionType
class PoolWallet:
MINIMUM_INITIAL_BALANCE = 1
MINIMUM_RELATIVE_LOCK_HEIGHT = 5
MAXIMUM_RELATIVE_LOCK_HEIGHT = 1000
wallet_state_manager: Any
log: logging.Logger
wallet_info: WalletInfo
target_state: Optional[PoolState]
next_transaction_fee: uint64
standard_wallet: Wallet
wallet_id: int
singleton_list: List[Coin]
"""
From the user's perspective, this is not a wallet at all, but a way to control
whether their pooling-enabled plots are being self-farmed, or farmed by a pool,
and by which pool. Self-pooling and joint pooling rewards are swept into the
users' regular wallet.
If this wallet is in SELF_POOLING state, the coin ID associated with the current
pool wallet contains the rewards gained while self-farming, so care must be taken
to disallow joining a new pool while we still have money on the pooling singleton UTXO.
Pools can be joined anonymously, without an account or prior signup.
The ability to change the farm-to target prevents abuse from pools
by giving the user the ability to quickly change pools, or self-farm.
The pool is also protected, by not allowing members to cheat by quickly leaving a pool,
and claiming a block that was pledged to the pool.
The pooling protocol and smart coin prevents a user from quickly leaving a pool
by enforcing a wait time when leaving the pool. A minimum number of blocks must pass
after the user declares that they are leaving the pool, and before they can start to
self-claim rewards again.
Control of switching states is granted to the owner public key.
We reveal the inner_puzzle to the pool during setup of the pooling protocol.
The pool can prove to itself that the inner puzzle pays to the pooling address,
and it can follow state changes in the pooling puzzle by tracing destruction and
creation of coins associate with this pooling singleton (the singleton controlling
this pool group).
The user trusts the pool to send mining rewards to the <XXX address XXX>
TODO: We should mark which address is receiving funds for our current state.
If the pool misbehaves, it is the user's responsibility to leave the pool
It is the Pool's responsibility to claim the rewards sent to the pool_puzzlehash.
The timeout for leaving the pool is expressed in number of blocks from the time
the user expresses their intent to leave.
"""
@classmethod
def type(cls) -> uint8:
return uint8(WalletType.POOLING_WALLET)
def id(self):
return self.wallet_info.id
@classmethod
def _verify_self_pooled(cls, state) -> Optional[str]:
err = ""
if state.pool_url != "":
err += " Unneeded pool_url for self-pooling"
if state.relative_lock_height != 0:
err += " Incorrect relative_lock_height for self-pooling"
return None if err == "" else err
@classmethod
def _verify_pooling_state(cls, state) -> Optional[str]:
err = ""
if state.relative_lock_height < cls.MINIMUM_RELATIVE_LOCK_HEIGHT:
err += (
f" Pool relative_lock_height ({state.relative_lock_height})"
f"is less than recommended minimum ({cls.MINIMUM_RELATIVE_LOCK_HEIGHT})"
)
elif state.relative_lock_height > cls.MAXIMUM_RELATIVE_LOCK_HEIGHT:
err += (
f" Pool relative_lock_height ({state.relative_lock_height})"
f"is greater than recommended maximum ({cls.MAXIMUM_RELATIVE_LOCK_HEIGHT})"
)
if state.pool_url in [None, ""]:
err += " Empty pool url in pooling state"
return err
@classmethod
def _verify_pool_state(cls, state: PoolState) -> Optional[str]:
if state.target_puzzle_hash is None:
return "Invalid puzzle_hash"
if state.version > POOL_PROTOCOL_VERSION:
return (
f"Detected pool protocol version {state.version}, which is "
f"newer than this wallet's version ({POOL_PROTOCOL_VERSION}). Please upgrade "
f"to use this pooling wallet"
)
if state.state == PoolSingletonState.SELF_POOLING:
return cls._verify_self_pooled(state)
elif state.state == PoolSingletonState.FARMING_TO_POOL or state.state == PoolSingletonState.LEAVING_POOL:
return cls._verify_pooling_state(state)
else:
return "Internal Error"
@classmethod
def _verify_initial_target_state(cls, initial_target_state):
err = cls._verify_pool_state(initial_target_state)
if err:
raise ValueError(f"Invalid internal Pool State: {err}: {initial_target_state}")
async def get_spend_history(self) -> List[Tuple[uint32, CoinSpend]]:
return self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)
async def get_current_state(self) -> PoolWalletInfo:
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
all_spends: List[CoinSpend] = [cs for _, cs in history]
# We must have at least the launcher spend
assert len(all_spends) >= 1
launcher_coin: Coin = all_spends[0].coin
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(all_spends[0])
tip_singleton_coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(all_spends[-1])
launcher_id: bytes32 = launcher_coin.name()
p2_singleton_puzzle_hash = launcher_id_to_p2_puzzle_hash(launcher_id, delayed_seconds, delayed_puzhash)
assert tip_singleton_coin is not None
curr_spend_i = len(all_spends) - 1
pool_state: Optional[PoolState] = None
last_singleton_spend_height = uint32(0)
while pool_state is None:
full_spend: CoinSpend = all_spends[curr_spend_i]
pool_state = solution_to_pool_state(full_spend)
last_singleton_spend_height = uint32(history[curr_spend_i][0])
curr_spend_i -= 1
assert pool_state is not None
current_inner = pool_state_to_inner_puzzle(
pool_state,
launcher_coin.name(),
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
return PoolWalletInfo(
pool_state,
self.target_state,
launcher_coin,
launcher_id,
p2_singleton_puzzle_hash,
current_inner,
tip_singleton_coin.name(),
last_singleton_spend_height,
)
async def get_unconfirmed_transactions(self) -> List[TransactionRecord]:
return await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.wallet_id)
async def get_tip(self) -> Tuple[uint32, CoinSpend]:
return self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)[-1]
async def update_pool_config(self, make_new_authentication_key: bool):
current_state: PoolWalletInfo = await self.get_current_state()
pool_config_list: List[PoolWalletConfig] = load_pool_config(self.wallet_state_manager.root_path)
pool_config_dict: Dict[bytes32, PoolWalletConfig] = {c.launcher_id: c for c in pool_config_list}
existing_config: Optional[PoolWalletConfig] = pool_config_dict.get(current_state.launcher_id, None)
if make_new_authentication_key or existing_config is None:
new_auth_sk: PrivateKey = master_sk_to_pooling_authentication_sk(
self.wallet_state_manager.private_key, uint32(self.wallet_id), uint32(0)
)
auth_pk: G1Element = new_auth_sk.get_g1()
payout_instructions: str = (await self.standard_wallet.get_new_puzzlehash(in_transaction=True)).hex()
else:
auth_pk = existing_config.authentication_public_key
payout_instructions = existing_config.payout_instructions
new_config: PoolWalletConfig = PoolWalletConfig(
current_state.launcher_id,
current_state.current.pool_url if current_state.current.pool_url else "",
payout_instructions,
current_state.current.target_puzzle_hash,
current_state.p2_singleton_puzzle_hash,
current_state.current.owner_pubkey,
auth_pk,
)
pool_config_dict[new_config.launcher_id] = new_config
await update_pool_config(self.wallet_state_manager.root_path, list(pool_config_dict.values()))
@staticmethod
def get_next_interesting_coin_ids(spend: CoinSpend) -> List[bytes32]:
# CoinSpend of one of the coins that we cared about. This coin was spent in a block, but might be in a reorg
# If we return a value, it is a coin ID that we are also interested in (to support two transitions per block)
coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(spend)
if coin is not None:
return [coin.name()]
return []
async def apply_state_transitions(self, block_spends: List[CoinSpend], block_height: uint32):
"""
Updates the Pool state (including DB) with new singleton spends. The block spends can contain many spends
that we are not interested in, and can contain many ephemeral spends. They must all be in the same block.
The DB must be committed after calling this method. All validation should be done here.
"""
coin_name_to_spend: Dict[bytes32, CoinSpend] = {cs.coin.name(): cs for cs in block_spends}
tip: Tuple[uint32, CoinSpend] = await self.get_tip()
tip_height = tip[0]
tip_spend = tip[1]
assert block_height >= tip_height # We should not have a spend with a lesser block height
while True:
tip_coin: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(tip_spend)
assert tip_coin is not None
spent_coin_name: bytes32 = tip_coin.name()
if spent_coin_name not in coin_name_to_spend:
break
spend: CoinSpend = coin_name_to_spend[spent_coin_name]
await self.wallet_state_manager.pool_store.add_spend(self.wallet_id, spend, block_height)
tip_spend = (await self.get_tip())[1]
self.log.info(f"New PoolWallet singleton tip_coin: {tip_spend}")
coin_name_to_spend.pop(spent_coin_name)
# If we have reached the target state, resets it to None. Loops back to get current state
for _, added_spend in reversed(self.wallet_state_manager.pool_store.get_spends_for_wallet(self.wallet_id)):
latest_state: Optional[PoolState] = solution_to_pool_state(added_spend)
if latest_state is not None:
if self.target_state == latest_state:
self.target_state = None
self.next_transaction_fee = uint64(0)
break
await self.update_pool_config(False)
async def rewind(self, block_height: int) -> bool:
"""
Rolls back all transactions after block_height, and if creation was after block_height, deletes the wallet.
Returns True if the wallet should be removed.
"""
try:
history: List[Tuple[uint32, CoinSpend]] = self.wallet_state_manager.pool_store.get_spends_for_wallet(
self.wallet_id
).copy()
prev_state: PoolWalletInfo = await self.get_current_state()
await self.wallet_state_manager.pool_store.rollback(block_height, self.wallet_id)
if len(history) > 0 and history[0][0] > block_height:
# If we have no entries in the DB, we have no singleton, so we should not have a wallet either
# The PoolWallet object becomes invalid after this.
await self.wallet_state_manager.interested_store.remove_interested_puzzle_hash(
prev_state.p2_singleton_puzzle_hash, in_transaction=True
)
return True
else:
if await self.get_current_state() != prev_state:
await self.update_pool_config(False)
return False
except Exception as e:
self.log.error(f"Exception rewinding: {e}")
return False
@staticmethod
async def create(
wallet_state_manager: Any,
wallet: Wallet,
launcher_coin_id: bytes32,
block_spends: List[CoinSpend],
block_height: uint32,
in_transaction: bool,
name: str = None,
):
"""
This creates a new PoolWallet with only one spend: the launcher spend. The DB MUST be committed after calling
this method.
"""
self = PoolWallet()
self.wallet_state_manager = wallet_state_manager
self.wallet_info = await wallet_state_manager.user_store.create_wallet(
"Pool wallet", WalletType.POOLING_WALLET.value, "", in_transaction=in_transaction
)
self.wallet_id = self.wallet_info.id
self.standard_wallet = wallet
self.target_state = None
self.next_transaction_fee = uint64(0)
self.log = logging.getLogger(name if name else __name__)
launcher_spend: Optional[CoinSpend] = None
for spend in block_spends:
if spend.coin.name() == launcher_coin_id:
launcher_spend = spend
assert launcher_spend is not None
await self.wallet_state_manager.pool_store.add_spend(self.wallet_id, launcher_spend, block_height)
await self.update_pool_config(True)
p2_puzzle_hash: bytes32 = (await self.get_current_state()).p2_singleton_puzzle_hash
await self.wallet_state_manager.interested_store.add_interested_puzzle_hash(
p2_puzzle_hash, self.wallet_id, True
)
await self.wallet_state_manager.add_new_wallet(self, self.wallet_info.id, create_puzzle_hashes=False)
self.wallet_state_manager.set_new_peak_callback(self.wallet_id, self.new_peak)
return self
@staticmethod
async def create_from_db(
wallet_state_manager: Any,
wallet: Wallet,
wallet_info: WalletInfo,
name: str = None,
):
"""
This creates a PoolWallet from DB. However, all data is already handled by WalletPoolStore, so we don't need
to do anything here.
"""
self = PoolWallet()
self.wallet_state_manager = wallet_state_manager
self.wallet_id = wallet_info.id
self.standard_wallet = wallet
self.wallet_info = wallet_info
self.target_state = None
self.log = logging.getLogger(name if name else __name__)
self.wallet_state_manager.set_new_peak_callback(self.wallet_id, self.new_peak)
return self
@staticmethod
async def create_new_pool_wallet_transaction(
wallet_state_manager: Any,
main_wallet: Wallet,
initial_target_state: PoolState,
fee: uint64 = uint64(0),
p2_singleton_delay_time: Optional[uint64] = None,
p2_singleton_delayed_ph: Optional[bytes32] = None,
) -> Tuple[TransactionRecord, bytes32, bytes32]:
"""
A "plot NFT", or pool wallet, represents the idea of a set of plots that all pay to
the same pooling puzzle. This puzzle is a `salvia singleton` that is
parameterized with a public key controlled by the user's wallet
(a `smart coin`). It contains an inner puzzle that can switch between
paying block rewards to a pool, or to a user's own wallet.
Call under the wallet state manger lock
"""
amount = 1
standard_wallet = main_wallet
if p2_singleton_delayed_ph is None:
p2_singleton_delayed_ph = await main_wallet.get_new_puzzlehash()
if p2_singleton_delay_time is None:
p2_singleton_delay_time = uint64(604800)
unspent_records = await wallet_state_manager.coin_store.get_unspent_coins_for_wallet(standard_wallet.wallet_id)
balance = await standard_wallet.get_confirmed_balance(unspent_records)
if balance < PoolWallet.MINIMUM_INITIAL_BALANCE:
raise ValueError("Not enough balance in main wallet to create a managed plotting pool.")
if balance < fee:
raise ValueError("Not enough balance in main wallet to create a managed plotting pool with fee {fee}.")
# Verify Parameters - raise if invalid
PoolWallet._verify_initial_target_state(initial_target_state)
spend_bundle, singleton_puzzle_hash, launcher_coin_id = await PoolWallet.generate_launcher_spend(
standard_wallet,
uint64(1),
initial_target_state,
wallet_state_manager.constants.GENESIS_CHALLENGE,
p2_singleton_delay_time,
p2_singleton_delayed_ph,
)
if spend_bundle is None:
raise ValueError("failed to generate ID for wallet")
standard_wallet_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=singleton_puzzle_hash,
amount=uint64(amount),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=wallet_state_manager.main_wallet.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
await standard_wallet.push_transaction(standard_wallet_record)
p2_singleton_puzzle_hash: bytes32 = launcher_id_to_p2_puzzle_hash(
launcher_coin_id, p2_singleton_delay_time, p2_singleton_delayed_ph
)
return standard_wallet_record, p2_singleton_puzzle_hash, launcher_coin_id
async def sign(self, coin_spend: CoinSpend) -> SpendBundle:
async def pk_to_sk(pk: G1Element) -> PrivateKey:
owner_sk: Optional[PrivateKey] = await find_owner_sk([self.wallet_state_manager.private_key], pk)
assert owner_sk is not None
return owner_sk
return await sign_coin_spends(
[coin_spend],
pk_to_sk,
self.wallet_state_manager.constants.AGG_SIG_ME_ADDITIONAL_DATA,
self.wallet_state_manager.constants.MAX_BLOCK_COST_CLVM,
)
async def generate_travel_transaction(self, fee: uint64) -> TransactionRecord:
# target_state is contained within pool_wallet_state
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
spend_history = await self.get_spend_history()
last_coin_spend: CoinSpend = spend_history[-1][1]
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(spend_history[0][1])
assert pool_wallet_info.target is not None
next_state = pool_wallet_info.target
if pool_wallet_info.current.state in [FARMING_TO_POOL]:
next_state = create_pool_state(
LEAVING_POOL,
pool_wallet_info.current.target_puzzle_hash,
pool_wallet_info.current.owner_pubkey,
pool_wallet_info.current.pool_url,
pool_wallet_info.current.relative_lock_height,
)
new_inner_puzzle = pool_state_to_inner_puzzle(
next_state,
pool_wallet_info.launcher_coin.name(),
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
new_full_puzzle: SerializedProgram = SerializedProgram.from_program(
create_full_puzzle(new_inner_puzzle, pool_wallet_info.launcher_coin.name())
)
outgoing_coin_spend, inner_puzzle = create_travel_spend(
last_coin_spend,
pool_wallet_info.launcher_coin,
pool_wallet_info.current,
next_state,
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
tip = (await self.get_tip())[1]
tip_coin = tip.coin
singleton = tip.additions()[0]
singleton_id = singleton.name()
assert outgoing_coin_spend.coin.parent_coin_info == tip_coin.name()
assert outgoing_coin_spend.coin.name() == singleton_id
assert new_inner_puzzle != inner_puzzle
if is_pool_member_inner_puzzle(inner_puzzle):
(
inner_f,
target_puzzle_hash,
p2_singleton_hash,
pubkey_as_program,
pool_reward_prefix,
escape_puzzle_hash,
) = uncurry_pool_member_inner_puzzle(inner_puzzle)
pk_bytes: bytes = bytes(pubkey_as_program.as_atom())
assert len(pk_bytes) == 48
owner_pubkey = G1Element.from_bytes(pk_bytes)
assert owner_pubkey == pool_wallet_info.current.owner_pubkey
elif is_pool_waitingroom_inner_puzzle(inner_puzzle):
(
target_puzzle_hash, # payout_puzzle_hash
relative_lock_height,
owner_pubkey,
p2_singleton_hash,
) = uncurry_pool_waitingroom_inner_puzzle(inner_puzzle)
pk_bytes = bytes(owner_pubkey.as_atom())
assert len(pk_bytes) == 48
assert owner_pubkey == pool_wallet_info.current.owner_pubkey
else:
raise RuntimeError("Invalid state")
signed_spend_bundle = await self.sign(outgoing_coin_spend)
assert signed_spend_bundle.removals()[0].puzzle_hash == singleton.puzzle_hash
assert signed_spend_bundle.removals()[0].name() == singleton.name()
assert signed_spend_bundle is not None
tx_record = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=new_full_puzzle.get_tree_hash(),
amount=uint64(1),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=signed_spend_bundle,
additions=signed_spend_bundle.additions(),
removals=signed_spend_bundle.removals(),
wallet_id=self.id(),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=signed_spend_bundle.name(),
)
return tx_record
@staticmethod
async def generate_launcher_spend(
standard_wallet: Wallet,
amount: uint64,
initial_target_state: PoolState,
genesis_challenge: bytes32,
delay_time: uint64,
delay_ph: bytes32,
) -> Tuple[SpendBundle, bytes32, bytes32]:
"""
Creates the initial singleton, which includes spending an origin coin, the launcher, and creating a singleton
with the "pooling" inner state, which can be either self pooling or using a pool
"""
coins: Set[Coin] = await standard_wallet.select_coins(amount)
if coins is None:
raise ValueError("Not enough coins to create pool wallet")
assert len(coins) == 1
launcher_parent: Coin = coins.copy().pop()
genesis_launcher_puz: Program = SINGLETON_LAUNCHER
launcher_coin: Coin = Coin(launcher_parent.name(), genesis_launcher_puz.get_tree_hash(), amount)
escaping_inner_puzzle: bytes32 = create_waiting_room_inner_puzzle(
initial_target_state.target_puzzle_hash,
initial_target_state.relative_lock_height,
initial_target_state.owner_pubkey,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
escaping_inner_puzzle_hash = escaping_inner_puzzle.get_tree_hash()
self_pooling_inner_puzzle: Program = create_pooling_inner_puzzle(
initial_target_state.target_puzzle_hash,
escaping_inner_puzzle_hash,
initial_target_state.owner_pubkey,
launcher_coin.name(),
genesis_challenge,
delay_time,
delay_ph,
)
if initial_target_state.state == SELF_POOLING:
puzzle = escaping_inner_puzzle
elif initial_target_state.state == FARMING_TO_POOL:
puzzle = self_pooling_inner_puzzle
else:
raise ValueError("Invalid initial state")
full_pooling_puzzle: Program = create_full_puzzle(puzzle, launcher_id=launcher_coin.name())
puzzle_hash: bytes32 = full_pooling_puzzle.get_tree_hash()
pool_state_bytes = Program.to([("p", bytes(initial_target_state)), ("t", delay_time), ("h", delay_ph)])
announcement_set: Set[bytes32] = set()
announcement_message = Program.to([puzzle_hash, amount, pool_state_bytes]).get_tree_hash()
announcement_set.add(Announcement(launcher_coin.name(), announcement_message).name())
create_launcher_tx_record: Optional[TransactionRecord] = await standard_wallet.generate_signed_transaction(
amount,
genesis_launcher_puz.get_tree_hash(),
uint64(0),
None,
coins,
None,
False,
announcement_set,
)
assert create_launcher_tx_record is not None and create_launcher_tx_record.spend_bundle is not None
genesis_launcher_solution: Program = Program.to([puzzle_hash, amount, pool_state_bytes])
launcher_cs: CoinSpend = CoinSpend(
launcher_coin,
SerializedProgram.from_program(genesis_launcher_puz),
SerializedProgram.from_program(genesis_launcher_solution),
)
launcher_sb: SpendBundle = SpendBundle([launcher_cs], G2Element())
# Current inner will be updated when state is verified on the blockchain
full_spend: SpendBundle = SpendBundle.aggregate([create_launcher_tx_record.spend_bundle, launcher_sb])
return full_spend, puzzle_hash, launcher_coin.name()
async def join_pool(self, target_state: PoolState, fee: uint64) -> Tuple[uint64, TransactionRecord]:
if target_state.state != FARMING_TO_POOL:
raise ValueError(f"join_pool must be called with target_state={FARMING_TO_POOL} (FARMING_TO_POOL)")
if self.target_state is not None:
raise ValueError(f"Cannot join a pool while waiting for target state: {self.target_state}")
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot join pool due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
current_state: PoolWalletInfo = await self.get_current_state()
total_fee = fee
if current_state.current == target_state:
self.target_state = None
msg = f"Asked to change to current state. Target = {target_state}"
self.log.info(msg)
raise ValueError(msg)
elif current_state.current.state in [SELF_POOLING, LEAVING_POOL]:
total_fee = fee
elif current_state.current.state == FARMING_TO_POOL:
total_fee = uint64(fee * 2)
if self.target_state is not None:
raise ValueError(
f"Cannot change to state {target_state} when already having target state: {self.target_state}"
)
PoolWallet._verify_initial_target_state(target_state)
if current_state.current.state == LEAVING_POOL:
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
last_height: uint32 = history[-1][0]
if self.wallet_state_manager.get_peak().height <= last_height + current_state.current.relative_lock_height:
raise ValueError(
f"Cannot join a pool until height {last_height + current_state.current.relative_lock_height}"
)
self.target_state = target_state
self.next_transaction_fee = fee
tx_record: TransactionRecord = await self.generate_travel_transaction(fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
return total_fee, tx_record
async def self_pool(self, fee: uint64) -> Tuple[uint64, TransactionRecord]:
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot self pool due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
if pool_wallet_info.current.state == SELF_POOLING:
raise ValueError("Attempted to self pool when already self pooling")
if self.target_state is not None:
raise ValueError(f"Cannot self pool when already having target state: {self.target_state}")
# Note the implications of getting owner_puzzlehash from our local wallet right now
# vs. having pre-arranged the target self-pooling address
owner_puzzlehash = await self.standard_wallet.get_new_puzzlehash()
owner_pubkey = pool_wallet_info.current.owner_pubkey
current_state: PoolWalletInfo = await self.get_current_state()
total_fee = uint64(fee * 2)
if current_state.current.state == LEAVING_POOL:
total_fee = fee
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
last_height: uint32 = history[-1][0]
if self.wallet_state_manager.get_peak().height <= last_height + current_state.current.relative_lock_height:
raise ValueError(
f"Cannot self pool until height {last_height + current_state.current.relative_lock_height}"
)
self.target_state = create_pool_state(
SELF_POOLING, owner_puzzlehash, owner_pubkey, pool_url=None, relative_lock_height=uint32(0)
)
self.next_transaction_fee = fee
tx_record = await self.generate_travel_transaction(fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
return total_fee, tx_record
async def claim_pool_rewards(self, fee: uint64) -> TransactionRecord:
# Search for p2_puzzle_hash coins, and spend them with the singleton
if await self.have_unconfirmed_transaction():
raise ValueError(
"Cannot claim due to unconfirmed transaction. If this is stuck, delete the unconfirmed transaction."
)
unspent_coin_records: List[CoinRecord] = list(
await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.wallet_id)
)
if len(unspent_coin_records) == 0:
raise ValueError("Nothing to claim, no transactions to p2_singleton_puzzle_hash")
farming_rewards: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_farming_rewards()
coin_to_height_farmed: Dict[Coin, uint32] = {}
for tx_record in farming_rewards:
height_farmed: Optional[uint32] = tx_record.height_farmed(
self.wallet_state_manager.constants.GENESIS_CHALLENGE
)
assert height_farmed is not None
coin_to_height_farmed[tx_record.additions[0]] = height_farmed
history: List[Tuple[uint32, CoinSpend]] = await self.get_spend_history()
assert len(history) > 0
delayed_seconds, delayed_puzhash = get_delayed_puz_info_from_launcher_spend(history[0][1])
current_state: PoolWalletInfo = await self.get_current_state()
last_solution: CoinSpend = history[-1][1]
all_spends: List[CoinSpend] = []
total_amount = 0
for coin_record in unspent_coin_records:
if coin_record.coin not in coin_to_height_farmed:
continue
if len(all_spends) >= 100:
# Limit the total number of spends, so it fits into the block
break
absorb_spend: List[CoinSpend] = create_absorb_spend(
last_solution,
current_state.current,
current_state.launcher_coin,
coin_to_height_farmed[coin_record.coin],
self.wallet_state_manager.constants.GENESIS_CHALLENGE,
delayed_seconds,
delayed_puzhash,
)
last_solution = absorb_spend[0]
all_spends += absorb_spend
total_amount += coin_record.coin.amount
self.log.info(
f"Farmer coin: {coin_record.coin} {coin_record.coin.name()} {coin_to_height_farmed[coin_record.coin]}"
)
if len(all_spends) == 0:
raise ValueError("Nothing to claim, no unspent coinbase rewards")
# No signatures are required to absorb
spend_bundle: SpendBundle = SpendBundle(all_spends, G2Element())
absorb_transaction: TransactionRecord = TransactionRecord(
confirmed_at_height=uint32(0),
created_at_time=uint64(int(time.time())),
to_puzzle_hash=current_state.current.target_puzzle_hash,
amount=uint64(total_amount),
fee_amount=fee,
confirmed=False,
sent=uint32(0),
spend_bundle=spend_bundle,
additions=spend_bundle.additions(),
removals=spend_bundle.removals(),
wallet_id=uint32(self.wallet_id),
sent_to=[],
trade_id=None,
type=uint32(TransactionType.OUTGOING_TX.value),
name=spend_bundle.name(),
)
await self.wallet_state_manager.add_pending_transaction(absorb_transaction)
return absorb_transaction
async def new_peak(self, peak: BlockRecord) -> None:
# This gets called from the WalletStateManager whenever there is a new peak
pool_wallet_info: PoolWalletInfo = await self.get_current_state()
tip_height, tip_spend = await self.get_tip()
if self.target_state is None:
return
if self.target_state == pool_wallet_info.current.state:
self.target_state = None
raise ValueError("Internal error")
if (
self.target_state.state in [FARMING_TO_POOL, SELF_POOLING]
and pool_wallet_info.current.state == LEAVING_POOL
):
leave_height = tip_height + pool_wallet_info.current.relative_lock_height
curr: BlockRecord = peak
while not curr.is_transaction_block:
curr = self.wallet_state_manager.blockchain.block_record(curr.prev_hash)
self.log.info(f"Last transaction block height: {curr.height} OK to leave at height {leave_height}")
# Add some buffer (+2) to reduce chances of a reorg
if curr.height > leave_height + 2:
unconfirmed: List[
TransactionRecord
] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(self.wallet_id)
next_tip: Optional[Coin] = get_most_recent_singleton_coin_from_coin_spend(tip_spend)
assert next_tip is not None
if any([rem.name() == next_tip.name() for tx_rec in unconfirmed for rem in tx_rec.removals]):
self.log.info("Already submitted second transaction, will not resubmit.")
return
self.log.info(f"Attempting to leave from\n{pool_wallet_info.current}\nto\n{self.target_state}")
assert self.target_state.version == POOL_PROTOCOL_VERSION
assert pool_wallet_info.current.state == LEAVING_POOL
assert self.target_state.target_puzzle_hash is not None
if self.target_state.state == SELF_POOLING:
assert self.target_state.relative_lock_height == 0
assert self.target_state.pool_url is None
elif self.target_state.state == FARMING_TO_POOL:
assert self.target_state.relative_lock_height >= self.MINIMUM_RELATIVE_LOCK_HEIGHT
assert self.target_state.pool_url is not None
tx_record = await self.generate_travel_transaction(self.next_transaction_fee)
await self.wallet_state_manager.add_pending_transaction(tx_record)
async def have_unconfirmed_transaction(self) -> bool:
unconfirmed: List[TransactionRecord] = await self.wallet_state_manager.tx_store.get_unconfirmed_for_wallet(
self.wallet_id
)
return len(unconfirmed) > 0
async def get_confirmed_balance(self, _=None) -> uint64:
amount: uint64 = uint64(0)
if (await self.get_current_state()).current.state == SELF_POOLING:
unspent_coin_records: List[WalletCoinRecord] = list(
await self.wallet_state_manager.coin_store.get_unspent_coins_for_wallet(self.wallet_id)
)
for record in unspent_coin_records:
if record.coinbase:
amount = uint64(amount + record.coin.amount)
return amount
async def get_unconfirmed_balance(self, record_list=None) -> uint64:
return await self.get_confirmed_balance(record_list)
async def get_spendable_balance(self, record_list=None) -> uint64:
return await self.get_confirmed_balance(record_list)
async def get_pending_change_balance(self) -> uint64:
return uint64(0)
async def get_max_send_amount(self, record_list=None) -> uint64:
return uint64(0)
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/pools/pool_wallet.py
| 0.725649 | 0.281412 |
pool_wallet.py
|
pypi
|
import logging
from blspy import PrivateKey
from salvia.cmds.init_funcs import check_keys
from salvia.util.keychain import Keychain
from pathlib import Path
from typing import Any, Dict, List, Optional, cast
# Commands that are handled by the KeychainServer
keychain_commands = [
"add_private_key",
"check_keys",
"delete_all_keys",
"delete_key_by_fingerprint",
"get_all_private_keys",
"get_first_private_key",
"get_key_for_fingerprint",
]
log = logging.getLogger(__name__)
KEYCHAIN_ERR_KEYERROR = "key error"
KEYCHAIN_ERR_LOCKED = "keyring is locked"
KEYCHAIN_ERR_NO_KEYS = "no keys present"
KEYCHAIN_ERR_MALFORMED_REQUEST = "malformed request"
class KeychainServer:
"""
Implements a remote keychain service for clients to perform key operations on
"""
def __init__(self):
self._default_keychain = Keychain()
self._alt_keychains = {}
def get_keychain_for_request(self, request: Dict[str, Any]):
"""
Keychain instances can have user and service strings associated with them.
The keychain backends ultimately point to the same data stores, but the user
and service strings are used to partition those data stores. We attempt to
maintain a mapping of user/service pairs to their corresponding Keychain.
"""
keychain = None
user = request.get("kc_user", self._default_keychain.user)
service = request.get("kc_service", self._default_keychain.service)
if user == self._default_keychain.user and service == self._default_keychain.service:
keychain = self._default_keychain
else:
key = (user or "unnamed") + (service or "")
if key in self._alt_keychains:
keychain = self._alt_keychains[key]
else:
keychain = Keychain(user=user, service=service)
self._alt_keychains[key] = keychain
return keychain
async def handle_command(self, command, data) -> Dict[str, Any]:
if command == "add_private_key":
return await self.add_private_key(cast(Dict[str, Any], data))
elif command == "check_keys":
return await self.check_keys(cast(Dict[str, Any], data))
elif command == "delete_all_keys":
return await self.delete_all_keys(cast(Dict[str, Any], data))
elif command == "delete_key_by_fingerprint":
return await self.delete_key_by_fingerprint(cast(Dict[str, Any], data))
elif command == "get_all_private_keys":
return await self.get_all_private_keys(cast(Dict[str, Any], data))
elif command == "get_first_private_key":
return await self.get_first_private_key(cast(Dict[str, Any], data))
elif command == "get_key_for_fingerprint":
return await self.get_key_for_fingerprint(cast(Dict[str, Any], data))
return {}
async def add_private_key(self, request: Dict[str, Any]) -> Dict[str, Any]:
if self.get_keychain_for_request(request).is_keyring_locked():
return {"success": False, "error": KEYCHAIN_ERR_LOCKED}
mnemonic = request.get("mnemonic", None)
passphrase = request.get("passphrase", None)
if mnemonic is None or passphrase is None:
return {
"success": False,
"error": KEYCHAIN_ERR_MALFORMED_REQUEST,
"error_details": {"message": "missing mnemonic and/or passphrase"},
}
try:
self.get_keychain_for_request(request).add_private_key(mnemonic, passphrase)
except KeyError as e:
return {
"success": False,
"error": KEYCHAIN_ERR_KEYERROR,
"error_details": {"message": f"The word '{e.args[0]}' is incorrect.'", "word": e.args[0]},
}
return {"success": True}
async def check_keys(self, request: Dict[str, Any]) -> Dict[str, Any]:
if self.get_keychain_for_request(request).is_keyring_locked():
return {"success": False, "error": KEYCHAIN_ERR_LOCKED}
root_path = request.get("root_path", None)
if root_path is None:
return {
"success": False,
"error": KEYCHAIN_ERR_MALFORMED_REQUEST,
"error_details": {"message": "missing root_path"},
}
check_keys(Path(root_path))
return {"success": True}
async def delete_all_keys(self, request: Dict[str, Any]) -> Dict[str, Any]:
if self.get_keychain_for_request(request).is_keyring_locked():
return {"success": False, "error": KEYCHAIN_ERR_LOCKED}
self.get_keychain_for_request(request).delete_all_keys()
return {"success": True}
async def delete_key_by_fingerprint(self, request: Dict[str, Any]) -> Dict[str, Any]:
if self.get_keychain_for_request(request).is_keyring_locked():
return {"success": False, "error": KEYCHAIN_ERR_LOCKED}
fingerprint = request.get("fingerprint", None)
if fingerprint is None:
return {
"success": False,
"error": KEYCHAIN_ERR_MALFORMED_REQUEST,
"error_details": {"message": "missing fingerprint"},
}
self.get_keychain_for_request(request).delete_key_by_fingerprint(fingerprint)
return {"success": True}
async def get_all_private_keys(self, request: Dict[str, Any]) -> Dict[str, Any]:
all_keys: List[Dict[str, Any]] = []
if self.get_keychain_for_request(request).is_keyring_locked():
return {"success": False, "error": KEYCHAIN_ERR_LOCKED}
private_keys = self.get_keychain_for_request(request).get_all_private_keys()
for sk, entropy in private_keys:
all_keys.append({"pk": bytes(sk.get_g1()).hex(), "entropy": entropy.hex()})
return {"success": True, "private_keys": all_keys}
async def get_first_private_key(self, request: Dict[str, Any]) -> Dict[str, Any]:
key: Dict[str, Any] = {}
if self.get_keychain_for_request(request).is_keyring_locked():
return {"success": False, "error": KEYCHAIN_ERR_LOCKED}
sk_ent = self.get_keychain_for_request(request).get_first_private_key()
if sk_ent is None:
return {"success": False, "error": KEYCHAIN_ERR_NO_KEYS}
pk_str = bytes(sk_ent[0].get_g1()).hex()
ent_str = sk_ent[1].hex()
key = {"pk": pk_str, "entropy": ent_str}
return {"success": True, "private_key": key}
async def get_key_for_fingerprint(self, request: Dict[str, Any]) -> Dict[str, Any]:
if self.get_keychain_for_request(request).is_keyring_locked():
return {"success": False, "error": KEYCHAIN_ERR_LOCKED}
private_keys = self.get_keychain_for_request(request).get_all_private_keys()
if len(private_keys) == 0:
return {"success": False, "error": KEYCHAIN_ERR_NO_KEYS}
fingerprint = request.get("fingerprint", None)
private_key: Optional[PrivateKey] = None
entropy: Optional[bytes] = None
if fingerprint is not None:
for sk, entropy in private_keys:
if sk.get_g1().get_fingerprint() == fingerprint:
private_key = sk
break
else:
private_key, entropy = private_keys[0]
if not private_key or not entropy:
return {"success": False, "error": KEYCHAIN_ERR_NO_KEYS}
else:
return {"success": True, "pk": bytes(private_key.get_g1()).hex(), "entropy": entropy.hex()}
|
/salvia-blockchain-0.7.7.tar.gz/salvia-blockchain-0.7.7/salvia/daemon/keychain_server.py
| 0.752013 | 0.209197 |
keychain_server.py
|
pypi
|
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
from .connectivity import connectivity_2D
from .global_numbering import compress_points_connectivity
from .unstructured_mesh import UnstructuredMesh
class StructuredGrid2D(object):
def __init__(self, x, y, mask=None, hmax=None, element_type=1):
"""
x and y are coordinates of the points, mask is elementwise
"""
self.x = x.copy()
self.y = y.copy()
self.nelem_lat = x.shape[0] - 1
self.nelem_rad = x.shape[1] - 1
if isinstance(hmax, float):
self.hmax = \
np.ones((self.nelem_lat, self.nelem_rad)) * hmax
elif isinstance(hmax, np.ndarray):
if hmax.shape == (self.nelem_lat, self.nelem_rad):
self.hmax = hmax.copy()
elif hmax.shape == (self.nelem_rad,):
self.hmax = hmax.repeat(self.nelem_lat).reshape(
(self.nelem_rad, self.nelem_lat)).T
else:
raise ValueError('shape of hmax does not match')
else:
self.hmax = None
if mask is None:
self.mask = np.zeros(np.array(x.shape) - 1, dtype='bool')
else:
self.mask = mask.copy()
if isinstance(element_type, int):
self.element_type = np.ones((self.nelem_lat, self.nelem_rad),
dtype='int') * element_type
else:
self.element_type = np.array(element_type)
@classmethod
def shell(self, r_inner, r_outer, nelem_lat, nelem_rad, min_colat=0.,
max_colat=180., hmax=None):
"""
generate a simple spherical structured grid
"""
if not 0. < max_colat <= 180.: # pragma: no cover
raise ValueError('max_colat needs to be in (0., 180.]')
sg = StructuredGrid2D.rectangle(
nelem_lat, nelem_rad, min_x=np.deg2rad(min_colat),
max_x=np.deg2rad(max_colat), min_y=r_inner, max_y=r_outer,
hmax=hmax)
sg.x, sg.y = sg.y * np.sin(sg.x), sg.y * np.cos(sg.x)
sg.element_type[:] = 0
return sg
@classmethod
def shell_vertical_refine(self, r_inner, r_outer, nelem_lat, nelem_rad,
min_colat=0., max_colat=180., hmax=None, p1=0.6,
p2=0.8):
if not 0. < max_colat <= 180.: # pragma: no cover
raise ValueError('max_colat needs to be in (0., 180.]')
sg = StructuredGrid2D.rectangle_vertical_refine(
nelem_lat, nelem_rad, min_x=np.deg2rad(min_colat),
max_x=np.deg2rad(max_colat), min_y=r_inner, max_y=r_outer,
hmax=hmax, p1=p1, p2=p2)
sg.x, sg.y = sg.y * np.sin(sg.x), sg.y * np.cos(sg.x)
sg.element_type[:] = 0
return sg
@classmethod
def shell_vertical_refine_doubling(self, r_inner, r_outer, nelem_lat,
nelem_rad, min_colat=0., max_colat=180.,
hmax=None, p1=0.65, p2=0.8):
if not 0. < max_colat <= 180.: # pragma: no cover
raise ValueError('max_colat needs to be in (0., 180.]')
sg = StructuredGrid2D.rectangle_vertical_refine_doubling(
nelem_lat, nelem_rad, min_x=np.deg2rad(min_colat),
max_x=np.deg2rad(max_colat), min_y=r_inner, max_y=r_outer,
hmax=hmax, p1=p1, p2=p2)
sg.x, sg.y = sg.y * np.sin(sg.x), sg.y * np.cos(sg.x)
sg.element_type[:] = 0
return sg
@classmethod
def shell_radii(self, radius, nelem_lat, min_colat=0., max_colat=180.,
hmax=None):
"""
generate a simple spherical structured grid with uneven radial spacing
"""
if not 0. < max_colat <= 180.: # pragma: no cover
raise ValueError('max_colat needs to be in (0., 180.]')
sg = StructuredGrid2D.rectangle_y(
radius, nelem_lat, min_x=np.deg2rad(min_colat),
max_x=np.deg2rad(max_colat), hmax=hmax)
sg.x, sg.y = sg.y * np.sin(sg.x), sg.y * np.cos(sg.x)
sg.element_type[:] = 0
return sg
@classmethod
def spherical_doubling_layer(self, r_inner, r_outer, nelem_lat,
min_colat=0., max_colat=180., p1=0.5,
hmax=None, flip_vertical=False):
"""
generate a simple spherical structured grid
nelem_lat is the element number on the inner side
"""
if not 0. < max_colat <= 180.: # pragma: no cover
raise ValueError('max_colat needs to be in (0., 180.]')
sg = StructuredGrid2D.cartesian_doubling_layer(
nelem_lat, min_x=np.deg2rad(min_colat),
max_x=np.deg2rad(max_colat), min_y=r_inner, max_y=r_outer,
hmax=hmax, p1=p1, flip_vertical=flip_vertical)
sg.x, sg.y = sg.y * np.sin(sg.x), sg.y * np.cos(sg.x)
sg.element_type[:] = 0
sg.element_type[1::4, :] = 2
sg.element_type[2::4, :] = 2
return sg
@classmethod
def spherical_tripling_layer(self, r_inner, r_outer, nelem_lat,
min_colat=0., max_colat=180., hmax=None,
flip_vertical=False):
raise ValueError('This feature is not included in the free SalvusMesher version.')
@classmethod
def spherical_axisem_tripling_layer(self, r_inner, r_outer, nelem_lat,
min_colat=0., max_colat=180.,
hmax=None, flip_vertical=False):
"""
generate a simple spherical structured grid with tripling such that no
element has a single point on the axis (needed for GLJ quadrature in
AxiSEM)
nelem_lat is the element number on the inner side, number of elements
on the outer side is nelem * 3 - 2
"""
if not 0. < max_colat <= 180.: # pragma: no cover
raise ValueError('max_colat needs to be in (0., 180.]')
sg = StructuredGrid2D.cartesian_axisem_tripling_layer(
nelem_lat, min_x=np.deg2rad(min_colat),
max_x=np.deg2rad(max_colat), min_y=r_inner, max_y=r_outer,
hmax=hmax, flip_vertical=flip_vertical)
sg.x, sg.y = sg.y * np.sin(sg.x), sg.y * np.cos(sg.x)
sg.element_type[:] = 0
sg.element_type[1::3, :] = 2
sg.element_type[2::3, :] = 2
return sg
@classmethod
def central_sphere_full(self, r_outer, nelem_lat, hmax=None):
sg1 = self.central_sphere(r_outer, nelem_lat, full=True, hmax=hmax,
left=False)
sg2 = self.central_sphere(r_outer, nelem_lat, full=True, hmax=hmax,
left=True)
return sg1 + sg2
@classmethod
def central_sphere(self, r_outer, nelem_lat, full=False, hmax=None,
left=False):
"""
generate a central mesh of a quarter or half circle
nelem_lat is the element number along the latitude for the half circle
returns two structured grids
"""
if not nelem_lat % 2 == 0: # pragma: no cover
raise ValueError('nelem_lat should be even')
if nelem_lat == 2:
isqrt2 = 1. / np.sqrt(2)
p = 0.4
x_mesh = np.array([[0., 0.], [0.5, p]]) * r_outer
y_mesh = np.array([[0., 0.5], [0., p]]) * r_outer
x_mesh_buffer = \
np.array([[0., isqrt2, 1.], [0., p, 0.5]]) * r_outer
y_mesh_buffer = \
np.array([[1., isqrt2, 0.], [0.5, p, 0.]]) * r_outer
else:
# set up parameters
nelem_square = int(nelem_lat / 2)
nelem_buffer = int(np.ceil(nelem_lat * (2. / np.pi - 0.5)))
nelem_rad = nelem_buffer + nelem_square
r_2 = (r_outer * nelem_square) / nelem_rad
r_3 = (r_2 + r_outer) / 2. ** 1.5
# build square
x = np.linspace(0., r_2, nelem_square + 1)
y = np.linspace(0., r_2, nelem_square + 1)
x_mesh, y_mesh = np.meshgrid(x, y, indexing='ij')
# deform square linearly with cosine boundaries on top and right
dx = (1 - np.cos(x_mesh / r_2 * np.pi / 2.))
dy = (1 - np.cos(y_mesh / r_2 * np.pi / 2.))
x_mesh += -dx * dy * (r_2 - r_3)
y_mesh += -dx * dy * (r_2 - r_3)
# add buffer layer
angle = np.linspace(0., np.pi / 2, nelem_square * 2 + 1)
x_buffer = r_outer * np.sin(angle)
y_buffer = r_outer * np.cos(angle)
x_square_surf = np.concatenate((x_mesh[:, -1], x_mesh[-1, -2::-1]))
y_square_surf = np.concatenate((y_mesh[:, -1], y_mesh[-1, -2::-1]))
x_mesh_buffer = np.zeros((nelem_buffer + 1, nelem_square * 2 + 1))
y_mesh_buffer = np.zeros((nelem_buffer + 1, nelem_square * 2 + 1))
# linearly map between circle and outer later of the square
for i in np.arange(nelem_buffer + 1):
w1 = float(i) / nelem_buffer
w2 = 1 - w1
x_mesh_buffer[i, :] = w1 * x_square_surf + w2 * x_buffer
y_mesh_buffer[i, :] = w1 * y_square_surf + w2 * y_buffer
if full:
x_mesh = np.concatenate((x_mesh.T[::-1, :], x_mesh.T[1:, :])).T
y_mesh = np.concatenate((-y_mesh.T[::-1, :], y_mesh.T[1:, :])).T
x_mesh_buffer = np.concatenate((x_mesh_buffer.T[:-1, :],
x_mesh_buffer.T[::-1, :])).T
y_mesh_buffer = np.concatenate((y_mesh_buffer.T[:-1, :],
-y_mesh_buffer.T[::-1, :])).T
element_type_buffer = np.ones(
(x_mesh_buffer.shape[0] - 1, x_mesh_buffer.shape[1] - 1),
dtype='int')
element_type_buffer[0, :] = 2
if left:
x_mesh = -1 * x_mesh[:, ::-1]
y_mesh = y_mesh[:, ::-1]
x_mesh_buffer = -1 * x_mesh_buffer[:, ::-1]
y_mesh_buffer = y_mesh_buffer[:, ::-1]
element_type_buffer = element_type_buffer[:, ::-1]
return (self(x_mesh, y_mesh, hmax=hmax, element_type=1),
self(x_mesh_buffer, y_mesh_buffer, hmax=hmax,
element_type=element_type_buffer))
@classmethod
def rectangle(self, nelem_x, nelem_y, min_x=0., max_x=1., min_y=0.,
max_y=1., hmax=None):
"""
generate a simple rectangular structured grid
"""
raise ValueError('This feature is not included in the free SalvusMesher version.')
@classmethod
def rectangle_vertical_refine(self, nelem_x, nelem_y, min_x=0., max_x=1.,
min_y=0., max_y=1., hmax=None, p1=0.6,
p2=0.8):
raise ValueError('This feature is not included in the free SalvusMesher version.')
@classmethod
def rectangle_vertical_refine_doubling(self, nelem_x, nelem_y, min_x=0.,
max_x=1., min_y=0., max_y=1.,
hmax=None, p1=0.65, p2=0.8):
raise ValueError('This feature is not included in the free SalvusMesher version.')
@classmethod
def rectangle_y(self, y, nelem_x, min_x=0., max_x=1., hmax=None):
"""
generate a simple rectangular structured grid
"""
x = np.linspace(min_x, max_x, nelem_x + 1)
y_mesh, x_mesh = np.meshgrid(y, x)
return self(x_mesh, y_mesh, hmax=hmax, element_type=1)
@classmethod
def cartesian_doubling_layer(self, nelem_x, min_x=0., max_x=1., min_y=0.,
max_y=1., hmax=None, move_nodes=True, p1=0.5,
apply_mask=True, flip_vertical=False):
"""
generate a cartesian structured grid with doubling
nelem_lat is the element number on the inner side
"""
x = np.linspace(min_x, max_x, nelem_x * 2 + 1)
y = np.linspace(min_y, max_y, 3)
y_mesh, x_mesh = np.meshgrid(y, x)
if move_nodes:
y_mesh[2::4, 1] = min_y
y_mesh[1::4, 1] = min_y + (max_y - min_y) * p1
y_mesh[3::4, 1] = min_y + (max_y - min_y) * p1
x_mesh[1::4, 0] += (max_x - min_x) / (nelem_x * 2.)
x_mesh[3::4, 0] -= (max_x - min_x) / (nelem_x * 2.)
# the mask works on element basis, hence the shape is 1 less in each
# dimension than the coordinate arrays
mask = np.zeros((nelem_x * 2, 2), dtype='bool')
if apply_mask:
mask[1::4, 0] = True
mask[2::4, 0] = True
if flip_vertical:
x_mesh = -x_mesh + max_x + min_x
y_mesh = -y_mesh + max_y + min_y
return self(x_mesh, y_mesh, mask, hmax=hmax)
@classmethod
def cartesian_tripling_layer(self, nelem_x, min_x=0., max_x=1., min_y=0.,
max_y=1., hmax=None, flip_vertical=False):
raise ValueError('This feature is not included in the free SalvusMesher version.')
@classmethod
def cartesian_axisem_tripling_layer(self, nelem_x, min_x=0., max_x=1.,
min_y=0., max_y=1., hmax=None,
flip_vertical=False):
"""
generate a cartesian structured grid with tripling such that no element
has a single point on the axis (needed for GLJ quadrature in AxiSEM)
nelem_lat is the element number on the inner side, number of elements
on the outer side is nelem * 3 - 2
"""
x = np.linspace(min_x, max_x, nelem_x * 3 - 1)
y = np.linspace(min_y, max_y, 3)
y_mesh, x_mesh = np.meshgrid(y, x)
x_bottom = np.linspace(min_x, max_x, nelem_x + 1)
y_mesh[2:-2:3, 1] = min_y
x_mesh[2:-2:3, 1] = x_bottom[1:-1]
# these points are not used (masked), but moving them as well to ensure
# they are not present in the unique point set later on.
x_mesh[2:-2:3, 0] = x_bottom[1:-1]
x_mesh[3::3, 0] = x_bottom[1:-1]
x_mesh[1::3, 0] = x_bottom[1:]
# move the central nodes as well to avoid skewed elements
x_center_l = (x_mesh[2:-2:3, 1] + x_mesh[2:-2:3, 2]) / 2.
x_center_1 = x_center_l[:-1] + np.diff(x_center_l) / 3.
x_center_2 = x_center_l[:-1] + np.diff(x_center_l) / 3. * 2.
x_mesh[3:-2:3, 1] = x_center_1
x_mesh[4:-2:3, 1] = x_center_2
# the mask works on element basis, hence the shape is 1 less in each
# dimension than the coordinate arrays
mask = np.zeros((nelem_x * 3 - 2, 2), dtype='bool')
mask[2:-2:3, 0] = True
mask[4:-2:3, 0] = True
mask[1, 0] = True
mask[-2, 0] = True
if flip_vertical:
x_mesh = -x_mesh + max_x + min_x
y_mesh = -y_mesh + max_y + min_y
return self(x_mesh, y_mesh, mask, hmax=hmax)
def add_mask(self, criterion):
"""
add a mask to the structured grid according to the criterion evaluated
at the element centroids.
:param criterion: callback function with the signature criterion(x, y)
which returns an array of bool, True for those elements that should
be masked out.
:type criterion: function
"""
raise ValueError('This feature is not included in the free SalvusMesher version.')
def nelem(self):
return self.mask.size - self.mask.sum()
def npoint(self):
return self.x.shape[0] * self.x.shape[1]
def get_element_centroid(self):
xc = self.x.copy()
xc = (xc[:-1, :] + xc[1:, :]) / 2
xc = (xc[:, :-1] + xc[:, 1:]) / 2
yc = self.y.copy()
yc = (yc[:-1, :] + yc[1:, :]) / 2
yc = (yc[:, :-1] + yc[:, 1:]) / 2
return xc, yc
def get_connectivity(self):
connectivity = connectivity_2D(self.nelem_lat, self.nelem_rad)
connectivity = \
connectivity[np.logical_not(self.mask).T.flatten(), :]
return connectivity
def get_element_type(self):
return self.element_type.T.flatten()[
np.logical_not(self.mask).T.flatten()]
def get_unstructured_mesh(self):
points = np.empty((2, self.npoint()))
points[0, :] = self.x.flatten()
points[1, :] = self.y.flatten()
points, connectivity = compress_points_connectivity(
points.T, self.get_connectivity())
return UnstructuredMesh(points, connectivity)
def plot(self, **kwargs):
"""
tolerance moves the colorscale slightly such that values at the
boundary are included.
"""
if kwargs.get('new_figure'):
plt.figure()
plt.axes().set_aspect('equal', 'datalim')
cmap = kwargs.get('cmap')
linewidths = kwargs.get('linewidths', 1.)
mode = kwargs.get('mode')
tolerance = kwargs.get('tolerance', 1e-3)
edgecolor = kwargs.get('edgecolor', 'k')
if mode in ['max_diag', 'max_edge']:
vmin, vmax = 0., 1.25
elif mode == 'element_type':
vmin, vmax = 0., 3.
else:
vmin, vmax = None, None
if mode is None:
data = self.mask
from .cm import cmap_white
cmap = cmap_white
elif mode == 'max_diag':
h1 = ((self.x[1:, 1:] - self.x[:-1, :-1]) ** 2 +
(self.y[1:, 1:] - self.y[:-1, :-1]) ** 2) ** 0.5
h2 = ((self.x[:-1, 1:] - self.x[1:, :-1]) ** 2 +
(self.y[:-1, 1:] - self.y[1:, :-1]) ** 2) ** 0.5
data = np.maximum(h1, h2) / self.hmax / 2 ** 0.5
elif mode == 'max_edge':
h1 = ((self.x[1:, 1:] - self.x[:-1, 1:]) ** 2 +
(self.y[1:, 1:] - self.y[:-1, 1:]) ** 2) ** 0.5
h2 = ((self.x[1:, 1:] - self.x[1:, :-1]) ** 2 +
(self.y[1:, 1:] - self.y[1:, :-1]) ** 2) ** 0.5
h3 = ((self.x[:-1, :-1] - self.x[:-1, 1:]) ** 2 +
(self.y[:-1, :-1] - self.y[:-1, 1:]) ** 2) ** 0.5
h4 = ((self.x[:-1, :-1] - self.x[1:, :-1]) ** 2 +
(self.y[:-1, :-1] - self.y[1:, :-1]) ** 2) ** 0.5
data1 = np.maximum(h1, h2) / self.hmax
data2 = np.maximum(h3, h4) / self.hmax
data = np.maximum(data1, data2)
elif mode == 'element_type':
data = self.element_type
else: # pragma: no cover
raise ValueError('Invalid mode "%s"' % (mode,))
if mode in ['max_edge', 'max_diag']:
vmin += tolerance
vmax += tolerance
if cmap is None:
from .cm import cmap_quality
cmap = cmap_quality
plt.pcolor(self.x, self.y, data, edgecolor=edgecolor, cmap=cmap,
linewidths=linewidths, vmin=vmin, vmax=vmax)
if kwargs.get('colorbar'):
plt.colorbar()
if kwargs.get('scatter'):
plt.scatter(self.x, self.y, s=50, c='r', marker='o')
plt.xlabel('x')
plt.ylabel('y')
if kwargs.get('show'): # pragma: no cover
plt.show()
else:
return plt.gcf()
|
/salvus_mesher_lite-1.0.6.tar.gz/salvus_mesher_lite-1.0.6/salvus_mesher_lite/structured_grid_2D.py
| 0.814828 | 0.333693 |
structured_grid_2D.py
|
pypi
|
from __future__ import division, print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import minimize
class LinearSolid(object):
def __init__(self, y_j, w_j, Q, alpha=1., pl_f_ref=1., f_min=None,
f_max=None):
if not len(y_j) == len(w_j): # pragma: no cover
raise ValueError('w_j and y_j not compatible in length')
self.y_j = y_j
self.w_j = w_j
self.N = len(y_j)
self.Q = Q
self.alpha = alpha
self.pl_f_ref = pl_f_ref
self.f_min = f_min
self.f_max = f_max
def get_Q(self, w, exact=False):
return self.q_linear_solid(self.y_j, self.w_j, w, exact=exact)
@staticmethod
def q_linear_solid(y_j, w_j, w, exact=False):
# see van Driel et al 2014, eq 7 and 21
Qls = np.ones_like(w)
if exact:
for _y_j, _w_j in zip(y_j, w_j):
Qls += _y_j * w ** 2 / (w**2 + _w_j**2)
Qls_denom = np.zeros_like(w)
for _y_j, _w_j in zip(y_j, w_j):
Qls_denom += _y_j * w * _w_j / (w**2 + _w_j**2)
return Qls / Qls_denom
@staticmethod
def power_law_Q(Q, alpha, w, pl_f_ref=1.):
return Q * (w / (2 * np.pi * pl_f_ref)) ** alpha
@staticmethod
def optimal_bandwidth(N):
"""
bandwidth that results in about 1% error in fitting Q, see van Driel
(2014), figure 4
"""
if N == 1:
return 0.0
elif N == 2:
return 0.8
elif N == 3:
return 1.7
elif N == 4:
return 2.5
elif N == 5:
return 3.2
elif N == 6:
return 3.9
elif N > 6:
# otherwise use linear extrapolation based on 5 and 6
return (3.9 - 3.2) * (N - 6) + 3.9
else:
raise ValueError('N must be > 0')
@classmethod
def invert_linear_solids(self, Q=1., f_min=0.001, f_max=1., N=3,
nfsamp=100, maxiter=1000, fixfreq=False,
freq_weight=True,
pl_f_ref=1., alpha=0., ftol=1e-10, exact=False):
"""
Parameters:
Q: clear
f_min, fmax: frequency band (in Hz)
N: number of standard linear solids
nfsamp: number of sampling frequencies for computation of the
misfit (log spaced in freqeuncy band)
max_it: number of iterations
Tw: starting temperature for the frequencies
Ty: starting temperature for the amplitudes
d: temperature decay
fixfreq: use log spaced peak frequencies (fixed)
verbose: clear
freq_weight: use frequency weighting to ensure better fit at high
frequencies
w_ref: reference angular frequency for power law Q
alpha: exponent for power law Q
Returns:
w_j: relaxation frequencies, equals 1/tau_sigma in zener
formulation
y_j: coefficients of the linear solids, (Emmerich & Korn, eq
23 and 24)
w: sampling frequencies at which Q(w) is minimized
q_fit: resulting q(w) at these frequencies
chil: error as a function of iteration to check convergence,
Note that this version uses log-l2 norm!
"""
# Set the starting test frequencies equally spaced in log frequency
if (N > 1):
w_j_test = np.logspace(np.log10(f_min), np.log10(f_max), N) * \
2 * np.pi
elif N == 1:
w_j_test = np.array([(f_max * f_min)**.5 * 2 * np.pi])
else: # pragma: no cover
raise ValueError('N needs to be >= 1')
w_j_start = w_j_test.copy()
# Set the sampling frequencies equally spaced in log frequency
w = np.logspace(np.log10(f_min), np.log10(f_max), nfsamp) * 2 * np.pi
# compute target Q from power law
Q_target = self.power_law_Q(Q, alpha, w, pl_f_ref)
# compute weights for linear frequency weighting
if freq_weight:
weights = w / np.sum(w) * nfsamp
else:
weights = np.ones_like(w)
# initial weights y_j based on an empirical guess
y_j_test = np.ones(N) * 6. / N
def l2_error(Q_target, Qls, weights=1):
# see van Driel et al 2014, eq 18
lse = np.sum(np.log(Q_target / Qls) ** 2 * weights)
lse /= len(Qls)
lse = np.sqrt(lse)
return lse
def objective_function(x, N, w, Q_target, weights, w_j_start):
y_j_test = x[:N] / Q
# weight peak frequencies to make them same order of magnitude
w_j_test = x[N:] * w_j_start
q_fit = self.q_linear_solid(y_j=y_j_test, w_j=w_j_test, w=w,
exact=exact)
l2e = l2_error(Q_target=Q_target, Qls=q_fit, weights=weights)
return l2e
args = (N, w, Q_target, weights, w_j_test.copy())
x0 = np.r_[y_j_test, np.ones(N)]
bounds = [(1e-10, 1e10) for l in range(2 * N)]
result = minimize(objective_function, x0, args, jac=False,
bounds=bounds,
options={'maxiter': maxiter,
'maxfun': maxiter * 2 * N, 'disp': False,
'ftol': ftol},
method='L-BFGS-B')
y_j = result.x[:N] / Q
w_j = result.x[N:] * w_j_start
return self(y_j, w_j, Q, alpha, pl_f_ref, f_min, f_max)
def plot(self, ffac=10., nfsamp=1000, errorlim=1.1, show=True, color='r',
exact=True, **kwargs):
fig = plt.figure()
f_min = self.f_min / ffac
f_max = self.f_max * ffac
w = np.logspace(np.log10(f_min), np.log10(f_max), nfsamp) * 2 * np.pi
Q_target = self.power_law_Q(self.Q, self.alpha, w, self.pl_f_ref)
Qls = self.get_Q(w, exact=exact)
plt.loglog(w / 2 / np.pi, Q_target, 'k')
plt.loglog(w / 2 / np.pi, Qls, color)
plt.xlim(f_min, f_max)
for _w in self.w_j:
plt.axvline(_w / 2 / np.pi, color=color, ls='--', zorder=10)
plt.loglog(w / 2 / np.pi, Q_target * errorlim, color='gray', ls='--')
plt.loglog(w / 2 / np.pi, Q_target / errorlim, color='gray', ls='--')
plt.axvline(self.f_min, color='gray', ls='-')
plt.axvline(self.f_max, color='gray', ls='-')
plt.xlabel('frequency / Hz')
plt.ylabel('Q')
plt.title('Q approximated with %d linear solids' % (self.N,))
if show: # pragma: no cover
plt.show()
else:
return fig
|
/salvus_mesher_lite-1.0.6.tar.gz/salvus_mesher_lite-1.0.6/salvus_mesher_lite/attenuation.py
| 0.897184 | 0.399519 |
attenuation.py
|
pypi
|
from __future__ import division, print_function
import numpy as np
from .helpers import load_lib
lib = load_lib()
def __lexsort_internal_loop(dim, points, loc, segstart, segend):
lib.lexsort_internal_loop(
points.shape[0],
dim,
points.shape[1],
points,
loc,
segstart,
segend,
len(segstart))
def get_global_lexi(points, tolerance_decimals=8):
"""
get global numbering scheme based on lexicographic sorting
Note that this method does not preserve previously existing sorting for
points that are readily unique.
:param points: points in ndim dimensional space stored in array with shape
(ndim, npoints)
:type points: numpy array
:param tolerance_decimals: accuracy to assume for two points to be equal
after renormalization of coordinates
:type tolerance_decimals: integer
:returns: tupel of the global indices as numpy integer array and shape
(npoint,) and the number of global points
"""
# do not work inplace here:
points = points.copy()
# initialization:
ndim, npoints = points.shape
tolerance = 10. ** (- tolerance_decimals)
seg_bnd = np.zeros(npoints + 1, dtype='bool')
seg_bnd[[0, -1]] = True
segstart = np.array([0])
segend = np.array([npoints])
# find maximum spread in all dimensions
maxmax = np.max([np.ptp(points[dim, :]) for dim in np.arange(ndim)])
# compute absolute tolerances
tolerance *= maxmax
# array to keep track of the reshuffling
loc = np.arange(npoints)
# sort lexicographically in all dimensions, where in higher iterations only
# those points that are the same in lower dimensions (within tolerance) are
# sorted. This is not only faster, but also ensures the functionality of
# the floating point tolerance.
for dim in np.arange(ndim):
# sort in each segment
__lexsort_internal_loop(dim, points, loc, segstart, segend)
if dim < ndim - 1:
# update segments of same points
seg_bnd[1:-1] = np.logical_or(
seg_bnd[1:-1], np.abs(np.diff(points[dim, :])) > tolerance)
segments = np.where(seg_bnd)[0]
# remove length 1 segments, as these don't need sorting
segfilt = np.where(np.diff(segments) > 1)[0]
segstart = segments[:-1][segfilt]
segend = segments[1:][segfilt]
# compute distance between neighbours:
dist_square = ((points[:, 1:] - points[:, :-1]) ** 2).sum(axis=0)
# generate global index
global_index = np.zeros(npoints, dtype='int')
global_index[1:] = (dist_square > tolerance ** 2).cumsum()
# total number of distinct points
nglob = global_index[-1] + 1
# resort index to the original sorting of points
sorted_global_index = np.zeros(npoints, dtype='int')
sorted_global_index[loc] = global_index
return sorted_global_index, nglob
def unique_points(points, return_point_ids=False, return_inverse=False,
tolerance_decimals=8):
global_ids, nglobal = get_global_lexi(points.T, tolerance_decimals)
unique_points, unique_point_ids, inverse_idx = np.unique(
global_ids, return_index=True, return_inverse=True)
retval = points[unique_point_ids, :], global_ids
if return_point_ids:
retval += unique_point_ids,
if return_inverse:
retval += inverse_idx,
return retval
def compress_points_connectivity(points, connectivity, return_mask=False,
tolerance_decimals=8):
points, global_ids = unique_points(
points, tolerance_decimals=tolerance_decimals)
connectivity = global_ids[connectivity]
# remove points, that are not in the connectivity
mask = np.zeros(points.shape[0], dtype='bool')
mask[connectivity.ravel()] = True
point_id_map = np.zeros(points.shape[0], dtype='int')
point_id_map[mask] = np.arange(mask.sum())
connectivity = point_id_map[connectivity]
points = points[mask, :]
retval = points, connectivity
if return_mask:
retval += mask,
return retval
|
/salvus_mesher_lite-1.0.6.tar.gz/salvus_mesher_lite-1.0.6/salvus_mesher_lite/global_numbering.py
| 0.801354 | 0.526282 |
global_numbering.py
|
pypi
|
from __future__ import division, print_function
from __future__ import absolute_import, unicode_literals
import argparse
import collections
import copy
import io
import itertools
import json
import os
import textwrap
import sys
import yaml
from ..compat import PY2
from ..mesh import run_mesher
from . import _CMD, _GROUPS, _MESH_TYPES
from .validate import validate_inputs
# Those are arguments that do not change the actual mesh and thus are not
# part of the input parameters. They need to be handled explicitly a
# couple of times and thus we just define them once here. kwargs are passed
# on to the .add_argument() method of the ArgumentParser object.
__steering_arguments = {
"output_filename": {"short_name": "o",
"kwargs": {
"type": str,
"default": "$mesh_type$_$model_name$_$period$.e",
"help": "Output filename for the mesh."}
},
"quiet": {"short_name": "q",
"kwargs": {
"action": "store_true",
"default": False,
"help": "Silence output."}},
"overwrite_file": {"kwargs": {
"action": "store_true",
"default": False,
"help": "Overwrite mesh file if it exists."}},
"generate_plots": {"kwargs": {
"action": "store_true",
"default": False,
"help": "Show plots while meshing. WARNING: slow and "
"memory intensive for short periods. In 3D only "
"useful for very small meshes."}}}
# Additional steering arguments used for saving to input files.
__steering_arguments_input_files_save = {
"save_yaml": {"kwargs": {
"type": str,
"metavar": "FILENAME",
"help": "If given, the mesher will not be run, but the "
"chosen parameters will be written to a yaml "
"file."}
},
"save_json": {"kwargs": {
"type": str,
"metavar": "FILENAME",
"help": "If given, the mesher will not be run, but the "
"chosen parameters will be written to a JSON "
"file."}}}
# Additional steering arguments used for reading from input files.
__steering_arguments_input_files_retrieve = {
"input_file": {"kwargs": {
"type": str,
"metavar": "FILENAME",
"help": "Use a YAML or JSON file as an input."
}}}
def __add_steering_args_to_parser(parser, save_inputs):
"""
Add steering parameters to an existing argparse ArgumentParser instance.
:param parser: The parser object to save to.
:type save_inputs: bool
:param save_inputs: If True, the arguments to save to an input file will be
added, otherwise those to retrieve will be added.
:return:
"""
if save_inputs:
iterargs = itertools.chain(
__steering_arguments.items(),
__steering_arguments_input_files_save.items())
else:
iterargs = itertools.chain(
__steering_arguments.items(),
__steering_arguments_input_files_retrieve.items())
for name, props in iterargs:
_args = ["--" + name]
if "short_name" in props:
_args.append("-" + props["short_name"])
parser.add_argument(*_args, **props["kwargs"])
def _generate_argument_parser(mesh_type):
"""
Dynamically generate a argument parser for the given mesh type.
Essentially generates a seperate argument parser for each subcommand.
:type mesh_type: str
:param mesh_type: The chosen high-level mesh type.
"""
mt = _MESH_TYPES[mesh_type]
# Dynamically build up the parser.
parser = argparse.ArgumentParser(
prog=_CMD + " " + mesh_type,
description=mt["description"])
# Add the steering arguments.
__add_steering_args_to_parser(parser, save_inputs=True)
# Autogenerate the rest from the schema file.
for name in mt["all_groups"]:
g = _GROUPS[name]
arg_group = parser.add_argument_group(g["title"], g["description"])
for arg_name, arg_meta in g["arguments"].items():
metavar = ""
nargs = None
action = None
a_type = None
# Resolve the type to an argparse compatible one.
if arg_meta["type"] == "string":
a_type = str
elif arg_meta["type"] == "boolean":
a_type = bool
elif arg_meta["type"] == "integer":
a_type = int
elif arg_meta["type"] == "number":
a_type = float
elif arg_meta["type"] == "array":
# Set nargs if appropriate.
nargs = "+"
if "minItems" in arg_meta and "maxItems" in arg_meta:
if arg_meta["minItems"] == arg_meta["maxItems"]:
nargs = arg_meta["minItems"]
if "type" in arg_meta["items"] and \
arg_meta["items"]["type"] == "string":
a_type = str
# Arrays with same type.
elif "type" in arg_meta["items"]:
if arg_meta["items"]["type"] == "number":
a_type = float
else: # pragma: no cover
raise NotImplementedError
# Arrays with varying type.
elif "anyOf" in arg_meta["items"]:
# Only treat the simple case for now.
t = [_i["type"] for _i in arg_meta["items"]["anyOf"]]
class OptionalType(argparse.Action):
def __call__(self, parser, namespace, values,
option_string=None):
try:
ll = []
for value in values:
if "null" in t and \
value.lower() in ["null", "none"]:
ll.append(None)
continue
if "number" in t:
ll.append(float(value))
except Exception as e: # pragma: no cover
parser.error(str(e))
setattr(namespace, self.dest, ll)
action = OptionalType
else: # pragma: no cover
raise NotImplementedError
else: # pragma: no cover
raise NotImplementedError
arg_group.add_argument(
"--%s.%s" % (name, arg_name), type=a_type,
default=arg_meta["default"], nargs=nargs,
action=action,
help=arg_meta["description"] +
" (default: %s)" % str(arg_meta["default"]),
metavar=metavar)
return parser
def inputs_to_yaml(inputs, output_filename):
"""
Convert the inputs dictionary to a YAML file.
Assume inputs has been validated and everything is in the correct
order.
:type inputs: dict
:param inputs: The input dictionary,
:type output_filename: str
:param output_filename: The output filename.
"""
# Manually create a YAML file - much easier then forcing comments into
# an existing YAML writer.
yaml = "\n".join([
"# {}".format(_MESH_TYPES[inputs["mesh_type"]]["description"]),
"mesh_type: {}\n\n".format(inputs["mesh_type"])
])
for key, value in inputs.items():
if key == "mesh_type":
continue
yaml += "# {}\n".format(_GROUPS[key]["description"])
yaml += "{}:\n".format(key)
for k, v in value.items():
_meta = _GROUPS[key]["arguments"][k]
indent = " # "
help = _meta["description"]
if "enum" in _meta:
help += " Choices: [%s]" % (", ".join(_meta["enum"]))
yaml += "\n".join(textwrap.wrap(help, initial_indent=indent,
subsequent_indent=indent))
yaml += "\n"
if isinstance(v, str):
yaml += ' {}: "{}"\n'.format(k, v)
else:
yaml += " {}: {}\n".format(
k, str(v).replace("None", "null"))
yaml += "\n"
yaml += "\n"
with io.open(output_filename, "w") as fh:
fh.write(yaml)
def invoke_subcommand(mesh_type, args):
"""
Invoke subcommand for the given mesh type,
:tyoe mesh_type: str
:param mesh_type: The mesh type for which to invoke the command.
:type args: list
:param args: Additional arguments.
"""
# Operate on copies to not modify the original items.
_m_types = copy.deepcopy(_MESH_TYPES)
_g = copy.deepcopy(_GROUPS)
if mesh_type not in _m_types:
print("Mesh type '%s' not valid. Available mesh types: %s" % (
mesh_type, ", ".join(_m_types.keys())))
sys.exit(1)
parser = _generate_argument_parser(mesh_type=mesh_type)
args = parser.parse_args(args=args)
inputs = collections.OrderedDict()
inputs["mesh_type"] = mesh_type
for g in _m_types[mesh_type]["all_groups"]:
inputs[g] = collections.OrderedDict()
for key, value in args._get_kwargs():
# Skip steering arguments.
if key in __steering_arguments or \
key in __steering_arguments_input_files_save:
continue
group_name, key = key.split('.')
inputs[group_name][key] = value
# Make sure its sorted to produce more consistent YAML and JSON files
# later down the road.
for key, value in inputs.items():
# Skip mesh_type key - its already good.
if key == "mesh_type":
continue
new_value = collections.OrderedDict()
for k in list(_g[key]["arguments"].keys()):
new_value[k] = value[k]
inputs[key] = new_value
return inputs, args
def print_help():
"""
Print the help message of the CLI interface.
"""
parser = argparse.ArgumentParser(
prog=_CMD,
description="CLI interface for the Salvus mesher.")
__add_steering_args_to_parser(parser, save_inputs=False)
parser.print_help()
fmt_str = "{:<%i}" % max(len(_i) for _i in _MESH_TYPES)
print("")
print("Available Commands:")
for key, value in _MESH_TYPES.items():
print(" " + fmt_str.format(key) + " " + value["description"])
def main(args=sys.argv[1:]):
"""
Main entry point for the CLI interface.
:type args: list
:param args: The command line parameters.
"""
# handle negative digits in the arguments which could be misinterpreted as
# options otherwise. See http://stackoverflow.com/a/21446783
for i, arg in enumerate(args):
if (arg[0] == '-') and arg[1].isdigit():
args[i] = ' ' + arg
# Show root level help if desired.
if not args or args in [["--help"], ["-h"]]:
print_help()
return
# This branch is executed if an input file is passed.
if [_i for _i in args if _i.strip().startswith("--input_file")]:
parser = argparse.ArgumentParser()
__add_steering_args_to_parser(parser, save_inputs=False)
parsed_args = parser.parse_args(args=args)
assert os.path.exists(parsed_args.input_file), \
"The input files must exist."
# This interestingly enough also works for JSON files.
with open(parsed_args.input_file, "r") as fh:
inputs = yaml.load(fh.read())
# This branch is run if arguments are passed.
else:
# Parse everything to a common dictionary.
inputs, parsed_args = \
invoke_subcommand(mesh_type=args[0], args=args[1:])
# Validate everything.
validate_inputs(inputs, exit_with_pretty_error_msg=True)
# Save to files if either of the two is given.
if hasattr(parsed_args, "save_yaml") and \
(parsed_args.save_yaml or parsed_args.save_json):
if parsed_args.save_yaml:
inputs_to_yaml(inputs, parsed_args.save_yaml)
print("Saved arguments to '%s'. The mesher itself did not run." %
parsed_args.save_yaml)
if parsed_args.save_json:
with io.open(parsed_args.save_json,
mode="wb" if PY2 else "wt") as fh:
json.dump(inputs, fh, indent=4)
print("Saved arguments to '%s'. The mesher itself did not run." %
parsed_args.save_json)
return
# Run mesher and also pass the steering parameters.
run_mesher.run_mesher(
inputs,
output_filename=parsed_args.output_filename,
verbose=not parsed_args.quiet,
overwrite_file=parsed_args.overwrite_file,
generate_plots=parsed_args.generate_plots)
if __name__ == "__main__": # pragma: no cover
main()
|
/salvus_mesher_lite-1.0.6.tar.gz/salvus_mesher_lite-1.0.6/salvus_mesher_lite/interface/__main__.py
| 0.541166 | 0.171234 |
__main__.py
|
pypi
|
from __future__ import division, print_function
import collections
import json
import os
from jsonschema import Draft4Validator
from ..compat import PY2
# Do a bit of preprocessing by opening the schema, making sure its actually
# valid, and converting it to something a bit more digestible later on.
# Open the schema.
with open(os.path.join(os.path.dirname(__file__), "schemas",
"salvus_mesher_lite_0.0.1.json"),
mode="rb" if PY2 else "rt") as fh:
# Preserve order.
_SCHEMA = json.load(fh, object_pairs_hook=collections.OrderedDict)
def __resolve_ref(ref):
path = [_i for _i in ref.split("/") if _i != "#"]
p = _SCHEMA
for _p in path:
p = p[_p]
return p
def __resolve_schema(schema):
def _walk_dict(s):
for key, value in s.items():
if isinstance(value, dict):
if list(value.keys()) == ["$ref"]:
s[key] = __resolve_ref(value["$ref"])
else:
_walk_dict(value)
_walk_dict(schema["definitions"])
__resolve_schema(_SCHEMA)
# Validate it.
Draft4Validator.check_schema(_SCHEMA)
# Parse the scheme to something that is slightly more digestible.
# Groups are lower level constructs like "spherical", "mesh3D", and so on
# that are later combined to form mesh types.
_GROUPS = {}
for key, value in _SCHEMA["definitions"]["properties"].items():
# groups need to contain at least a description, title and properties
if all(v in value for v in ['description', 'title', 'properties']):
_GROUPS[key] = {
"description": value["description"],
"title": value["title"],
"arguments": value["properties"]
}
# The mesh types are the higher level constructs like Globe3D and
# SphericalChunk3D.
_MESH_TYPES = collections.OrderedDict()
for mt in _SCHEMA["allOf"][0]["properties"]["mesh_type"]["enum"]:
props = [
_i["allOf"][0]
for _i in _SCHEMA["allOf"][1]["anyOf"]
if _i["allOf"][0]["properties"]["mesh_type"]["enum"] == [mt]][0]
_MESH_TYPES[mt] = {
"description": props["properties"]["mesh_type"]["description"],
"required_groups": props["required"],
# The first group is always the mesh type so it can be skipped.
"all_groups": list(props['properties'].keys())[1:]
}
# The command used to call it.
_CMD = "python -m salvus_mesher_lite.interface"
|
/salvus_mesher_lite-1.0.6.tar.gz/salvus_mesher_lite-1.0.6/salvus_mesher_lite/interface/__init__.py
| 0.488039 | 0.157979 |
__init__.py
|
pypi
|
from __future__ import absolute_import, division, print_function
import collections
import yaml
from ..interface.validate import validate_inputs
from .cartesian import run_mesher_cartesian
from .spherical import run_mesher_spherical
R_OCEAN_REF = 1.1
def run_mesher(inputs, output_filename=None, verbose=True, overwrite_file=True,
generate_plots=False, write_mesh_to_file=True,
mesh_processing_callback=None, **kwargs):
"""
Convert the input parameters into a mesh.
The job of this function is to turn the input parameters from the higher
level interfaces into an actual mesh by calling the appropriate lower
level functions.
It aims to make the creation of the most common meshes as easy as possible.
:type inputs: dict
:param inputs: The input parameters as definied by the JSONSchema.
:type output_filename: str
:param output_filename: The output filename.
:type verbose: bool
:param verbose: Control verbosity.
:type overwrite_file: bool
:param overwrite_file: Overwrite files if they already exist.
:type generate_plots: bool
:param generate_plots: Show plots while meshing. Slow and potentially
memory intensive and mainly useful for debugging.
"""
if output_filename is None and write_mesh_to_file:
raise RuntimeError('Need a filename to write mesh to file.')
if type(inputs) in [dict, collections.OrderedDict]:
pass
elif type(inputs) is str:
with open(inputs, "r") as fh:
inputs = yaml.load(fh.read())
validate_inputs(inputs, exit_with_pretty_error_msg=True)
else:
raise TypeError('inputs should be either dict ore filename')
if inputs["mesh_type"] == 'TidalLoading':
raise ValueError('This feature is not included in the free SalvusMesher version.')
elif "spherical" in inputs or 'spherical2D' in inputs:
return run_mesher_spherical(
inputs, output_filename, verbose=verbose,
overwrite_file=overwrite_file, generate_plots=generate_plots,
write_mesh_to_file=write_mesh_to_file,
mesh_processing_callback=mesh_processing_callback, **kwargs)
elif "cartesian2D" in inputs or "cartesian3D" in inputs or \
"cartesian2Daxisem" in inputs:
return run_mesher_cartesian(
inputs, output_filename, verbose=verbose,
overwrite_file=overwrite_file, generate_plots=generate_plots,
write_mesh_to_file=write_mesh_to_file,
mesh_processing_callback=mesh_processing_callback, **kwargs)
else:
raise NotImplementedError
|
/salvus_mesher_lite-1.0.6.tar.gz/salvus_mesher_lite-1.0.6/salvus_mesher_lite/mesh/run_mesher.py
| 0.735167 | 0.454472 |
run_mesher.py
|
pypi
|
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev)
|
/sam_29082022_distributions-0.1.tar.gz/sam_29082022_distributions-0.1/sam_29082022_distributions/Gaussiandistribution.py
| 0.688364 | 0.853058 |
Gaussiandistribution.py
|
pypi
|
import pandas as pd
import numpy as np
import scipy as sp
import os
import errno
from sklearn.decomposition import PCA
import umap.distances as dist
from sklearn.utils.extmath import svd_flip
from sklearn.utils import check_array, check_random_state
from scipy import sparse
import sklearn.utils.sparsefuncs as sf
from umap.umap_ import nearest_neighbors
__version__ = "0.8.7"
def find_corr_genes(sam, input_gene):
"""Rank genes by their spatially averaged expression pattern correlations to
a desired gene.
Parameters
----------
sam - SAM
The analyzed SAM object
input_gene - string
The gene ID with respect to which correlations will be computed.
Returns
-------
A ranked list of gene IDs based on correlation to the input gene.
"""
all_gene_names = np.array(list(sam.adata.var_names))
D_avg = sam.adata.layers["X_knn_avg"]
input_gene = np.where(all_gene_names == input_gene)[0]
if input_gene.size == 0:
print(
"Gene note found in the filtered dataset. Note "
"that genes are case sensitive."
)
return
pw_corr = generate_correlation_map(D_avg.T.A, D_avg[:, input_gene].T.A)
return all_gene_names[np.argsort(-pw_corr.flatten())]
def _pca_with_sparse(X, npcs, solver='arpack', mu=None, seed=0, mu_axis=0):
random_state = check_random_state(seed)
np.random.set_state(random_state.get_state())
random_init = np.random.rand(np.min(X.shape))
X = check_array(X, accept_sparse=['csr', 'csc'])
if mu is None:
if mu_axis == 0:
mu = X.mean(0).A.flatten()[None, :]
else:
mu = X.mean(1).A.flatten()[:, None]
if mu_axis == 0:
mdot = mu.dot
mmat = mdot
mhdot = mu.T.dot
mhmat = mu.T.dot
Xdot = X.dot
Xmat = Xdot
XHdot = X.T.conj().dot
XHmat = XHdot
ones = np.ones(X.shape[0])[None, :].dot
def matvec(x):
return Xdot(x) - mdot(x)
def matmat(x):
return Xmat(x) - mmat(x)
def rmatvec(x):
return XHdot(x) - mhdot(ones(x))
def rmatmat(x):
return XHmat(x) - mhmat(ones(x))
else:
mdot = mu.dot
mmat = mdot
mhdot = mu.T.dot
mhmat = mu.T.dot
Xdot = X.dot
Xmat = Xdot
XHdot = X.T.conj().dot
XHmat = XHdot
ones = np.ones(X.shape[1])[None, :].dot
def matvec(x):
return Xdot(x) - mdot(ones(x))
def matmat(x):
return Xmat(x) - mmat(ones(x))
def rmatvec(x):
return XHdot(x) - mhdot(x)
def rmatmat(x):
return XHmat(x) - mhmat(x)
XL = sp.sparse.linalg.LinearOperator(
matvec=matvec,
dtype=X.dtype,
matmat=matmat,
shape=X.shape,
rmatvec=rmatvec,
rmatmat=rmatmat,
)
u, s, v = sp.sparse.linalg.svds(XL, solver=solver, k=npcs, v0=random_init)
u, v = svd_flip(u, v)
idx = np.argsort(-s)
v = v[idx, :]
X_pca = (u * s)[:, idx]
ev = s[idx] ** 2 / (X.shape[0] - 1)
total_var = sf.mean_variance_axis(X, axis=0)[1].sum()
ev_ratio = ev / total_var
output = {
'X_pca': X_pca,
'variance': ev,
'variance_ratio': ev_ratio,
'components': v,
}
return output
def nearest_neighbors_wrapper(X,n_neighbors=15,metric='correlation',metric_kwds={},angular=True,random_state=0):
random_state=np.random.RandomState(random_state)
return nearest_neighbors(X,n_neighbors,metric,metric_kwds,angular,random_state)[:2]
def knndist(nnma):
x, y = nnma.nonzero()
data = nnma.data
knn = y.reshape((nnma.shape[0], nnma[0, :].data.size))
val = data.reshape(knn.shape)
return knn, val
def save_figures(filename, fig_IDs=None, **kwargs):
"""
Save figures.
Parameters
----------
filename - str
Name of output file
fig_IDs - int, numpy.array, list, optional, default None
A list of open figure IDs or a figure ID that will be saved to a
pdf/png file respectively.
**kwargs -
Extra keyword arguments passed into 'matplotlib.pyplot.savefig'.
"""
import matplotlib.pyplot as plt
if fig_IDs is not None:
if type(fig_IDs) is list:
savetype = "pdf"
else:
savetype = "png"
else:
savetype = "pdf"
if savetype == "pdf":
from matplotlib.backends.backend_pdf import PdfPages
if len(filename.split(".")) == 1:
filename = filename + ".pdf"
else:
filename = ".".join(filename.split(".")[:-1]) + ".pdf"
pdf = PdfPages(filename)
if fig_IDs is None:
figs = [plt.figure(n) for n in plt.get_fignums()]
else:
figs = [plt.figure(n) for n in fig_IDs]
for fig in figs:
fig.savefig(pdf, format="pdf", **kwargs)
pdf.close()
elif savetype == "png":
plt.figure(fig_IDs).savefig(filename, **kwargs)
def weighted_PCA(mat, do_weight=True, npcs=None, solver="auto",seed = 0):
# mat = (mat - np.mean(mat, axis=0))
if do_weight:
if min(mat.shape) >= 10000 and npcs is None:
print(
"More than 10,000 cells. Running with 'npcs' set to < 1000 is"
" recommended."
)
if npcs is None:
ncom = min(mat.shape)
else:
ncom = min((min(mat.shape), npcs))
pca = PCA(svd_solver=solver, n_components=ncom,random_state=check_random_state(seed))
reduced = pca.fit_transform(mat)
scaled_eigenvalues = pca.explained_variance_
scaled_eigenvalues = scaled_eigenvalues / scaled_eigenvalues.max()
reduced_weighted = reduced * scaled_eigenvalues[None, :] ** 0.5
else:
pca = PCA(n_components=npcs, svd_solver=solver,random_state=check_random_state(seed))
reduced = pca.fit_transform(mat)
if reduced.shape[1] == 1:
pca = PCA(n_components=2, svd_solver=solver,random_state=check_random_state(seed))
reduced = pca.fit_transform(mat)
reduced_weighted = reduced
return reduced_weighted, pca
def transform_wPCA(mat, pca):
mat = mat - pca.mean_
reduced = mat.dot(pca.components_.T)
v = pca.explained_variance_ # .var(0)
scaled_eigenvalues = v / v.max()
reduced_weighted = np.array(reduced) * scaled_eigenvalues[None, :] ** 0.5
return reduced_weighted
def search_string(vec, s, case_sensitive=False, invert=False):
vec = np.array(vec)
if isinstance(s,list):
S = s
else:
S = [s]
V=[]; M=[]
for s in S:
m = []
if not case_sensitive:
s = s.lower()
for i in range(len(vec)):
if case_sensitive:
st = vec[i]
else:
st = vec[i].lower()
b = st.find(s)
if not invert and b != -1 or invert and b == -1:
m.append(i)
if len(m) > 0:
V.append(vec[np.array(m)]); M.append(np.array(m))
if len(V)>0:
i = len(V)
if not invert:
V = np.concatenate(V); M = np.concatenate(M);
if i > 1:
ix = np.sort(np.unique(M,return_index=True)[1])
V=V[ix]; M=M[ix];
else:
for i in range(len(V)):
V[i]=list(set(V[i]).intersection(*V))
V = vec[np.in1d(vec,np.unique(np.concatenate(V)))]
M = np.array([np.where(vec==x)[0][0] for x in V])
return V,M
else:
return -1,-1
def distance_matrix_error(dist1, dist2):
s = 0
for k in range(dist1.shape[0]):
s += np.corrcoef(dist1[k, :], dist2[k, :])[0, 1]
return 1 - s / dist1.shape[0]
def generate_euclidean_map(A, B):
a = (A ** 2).sum(1).flatten()
b = (B ** 2).sum(1).flatten()
x = a[:, None] + b[None, :] - 2 * np.dot(A, B.T)
x[x < 0] = 0
return np.sqrt(x)
def generate_correlation_map(x, y):
mu_x = x.mean(1)
mu_y = y.mean(1)
n = x.shape[1]
if n != y.shape[1]:
raise ValueError("x and y must " + "have the same number of timepoints.")
s_x = x.std(1, ddof=n - 1)
s_y = y.std(1, ddof=n - 1)
s_x[s_x == 0] = 1
s_y[s_y == 0] = 1
cov = np.dot(x, y.T) - n * np.dot(mu_x[:, None], mu_y[None, :])
return cov / np.dot(s_x[:, None], s_y[None, :])
def extract_annotation(cn, x, c="_"):
m = []
if x is not None:
for i in range(cn.size):
f = cn[i].split(c)
x = min(len(f) - 1, x)
m.append(f[x])
return np.array(m)
else:
ms = []
ls = []
for i in range(cn.size):
f = cn[i].split(c)
m = []
for x in range(len(f)):
m.append(f[x])
ms.append(m)
ls.append(len(m))
ml = max(ls)
for i in range(len(ms)):
ms[i].extend([""] * (ml - len(ms[i])))
if ml - len(ms[i]) > 0:
ms[i] = np.concatenate(ms[i])
ms = np.vstack(ms)
MS = []
for i in range(ms.shape[1]):
MS.append(ms[:, i])
return MS
def isolate(dt, x1, x2, y1, y2):
return np.where(
np.logical_and(
np.logical_and(dt[:, 0] > x1, dt[:, 0] < x2),
np.logical_and(dt[:, 1] > y1, dt[:, 1] < y2),
)
)[0]
def to_lower(y):
x = y.copy().flatten()
for i in range(x.size):
x[i] = x[i].lower()
return x
def to_upper(y):
x = y.copy().flatten()
for i in range(x.size):
x[i] = x[i].upper()
return x
def create_folder(path):
try:
os.makedirs(path)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
def convert_annotations(A):
x = np.unique(A)
y = np.zeros(A.size)
z = 0
for i in x:
y[A == i] = z
z += 1
return y.astype("int")
def nearest_neighbors_hnsw(x,ef=200,M=48,n_neighbors = 100):
import hnswlib
labels = np.arange(x.shape[0])
p = hnswlib.Index(space = 'cosine', dim = x.shape[1])
p.init_index(max_elements = x.shape[0], ef_construction = ef, M = M)
p.add_items(x, labels)
p.set_ef(ef)
idx, dist = p.knn_query(x, k = n_neighbors)
return idx,dist
def calc_nnm(g_weighted, k, distance=None):
if g_weighted.shape[0] > 0:
if distance == 'cosine':
nnm, dists = nearest_neighbors_hnsw(g_weighted, n_neighbors=k)
else:
nnm, dists = nearest_neighbors_wrapper(g_weighted, n_neighbors=k, metric=distance)
EDM = gen_sparse_knn(nnm, dists)
EDM = EDM.tocsr()
return EDM
def compute_distances(A, dm):
if dm == "euclidean":
m = np.dot(A, A.T)
h = np.diag(m)
x = h[:, None] + h[None, :] - 2 * m
x[x < 0] = 0
dist = np.sqrt(x)
elif dm == "correlation":
dist = 1 - np.corrcoef(A)
else:
dist = sp.spatial.distance.squareform(sp.spatial.distance.pdist(A, metric=dm))
return dist
def dist_to_nn(d, K): # , offset = 0):
E = d.copy()
np.fill_diagonal(E, -1)
M = np.max(E) * 2
x = np.argsort(E, axis=1)[:, :K] # offset:K+offset]
E[
np.tile(
np.arange(E.shape[0]).reshape(E.shape[0], -1), (1, x.shape[1])
).flatten(),
x.flatten(),
] = M
E[E < M] = 0
E[E > 0] = 1
return E # ,x
def to_sparse_knn(D1, k):
for i in range(D1.shape[0]):
x = D1.data[D1.indptr[i] : D1.indptr[i + 1]]
idx = np.argsort(x)
if idx.size > k:
x[idx[:-k]] = 0
D1.data[D1.indptr[i] : D1.indptr[i + 1]] = x
D1.eliminate_zeros()
return D1
def gen_sparse_knn(knni, knnd, shape=None):
if shape is None:
shape = (knni.shape[0], knni.shape[0])
D1 = sp.sparse.lil_matrix(shape)
D1[
np.tile(np.arange(knni.shape[0])[:, None], (1, knni.shape[1])).flatten().astype('int32'),
knni.flatten().astype('int32'),
] = knnd.flatten()
D1 = D1.tocsr()
return D1
|
/sam-algorithm-1.0.2.tar.gz/sam-algorithm-1.0.2/samalg/utilities.py
| 0.702938 | 0.558989 |
utilities.py
|
pypi
|
import re
from itertools import groupby, islice
# Globals for parsing the CIGAR string
# https://github.com/samtools/hts-specs/blob/da805be01e2ceaaa69fdde9f33c5377bf9ee6369/SAMv1.tex#L383
# operations that consume the reference
_cigar_ref = set(('M', 'D', 'N', '=', 'X', 'EQ'))
# operations that consume the query
_cigar_query = set(('M', 'I', 'S', '=', 'X', 'EQ'))
# operations that do not represent an alignment
_cigar_no_align = set(('H', 'P'))
_valid_cigar = _cigar_ref | _cigar_query | _cigar_no_align
# operations that can be represented as aligned to the reference
_cigar_align = _cigar_ref & _cigar_query
# operations that only consume the reference
_cigar_ref_only = _cigar_ref - _cigar_align
# operations that only consume the query
_cigar_query_only = _cigar_query - _cigar_align
def pairwise_alignment(read, cigar, mdz):
"""
Return the original pairwise alignment for a
read given a sequence, cigar string and md:z tag
Parameters:
read - The sequence for the read
cigar - A cigarplus string
mdz - An MD:Z tag string
"""
seq_pos = 0
mdz_pos = 0
reads = list(read)
expanded_cigar = cigar_expand(cigar)
expanded_mdz = mdz_expand(mdz)
ref = []
seq = []
match_str = []
for _, op in enumerate(expanded_cigar):
if op == 'H':
# For hard masking, we skip over that
continue
elif op == 'M':
if expanded_mdz[mdz_pos]:
ref.append(expanded_mdz[mdz_pos])
match_str.append(':')
else:
ref.append(reads[seq_pos])
match_str.append('|')
seq.append(reads[seq_pos])
seq_pos += 1
mdz_pos += 1
elif op == 'I':
ref.append('-')
seq.append(reads[seq_pos])
match_str.append(' ')
seq_pos += 1
elif op == 'D':
ref.append(expanded_mdz[mdz_pos])
seq.append('-')
match_str.append(' ')
mdz_pos += 1
elif op == 'X':
ref.append(expanded_mdz[mdz_pos])
seq.append(reads[seq_pos])
match_str.append(':')
seq_pos += 1
mdz_pos += 1
elif op == '=':
ref.append(reads[seq_pos])
seq.append(reads[seq_pos])
match_str.append('|')
seq_pos += 1
mdz_pos += 1
elif op == 'N':
ref.append('.')
seq.append('.')
match_str.append(' ')
elif op == 'S':
ref.append('.')
seq.append(reads[seq_pos].lower())
match_str.append(' ')
seq_pos += 1
elif op == 'P':
ref.append('*')
seq.append('*')
match_str.append(' ')
return ''.join(ref), ''.join(match_str), ''.join(seq)
def cigar_expand(cigar):
"""
Expand the CIGAR string in to a character map of the
alignment.
eg. 6M3I2M
MMMMMMIIIMM
"""
mapping = []
for c, op in cigar_split(cigar):
mapping.extend([op] * c)
return mapping
def cigar_split(cigar):
"""
Split the CIGAR string in to (num, op) tuples
"""
# https://github.com/brentp/bwa-meth
if cigar == "*":
yield (0, None)
return
cig_iter = groupby(cigar, lambda c: c.isdigit())
for _, n in cig_iter:
op = int("".join(n)), "".join(next(cig_iter)[1])
if op[1] in _valid_cigar:
yield op
else:
raise ValueError("CIGAR operation %s in record %s is invalid." % (op[1], cigar))
def mdz_expand(mdz):
"""
Expands the MD:Z tag in to a character map of the base changes
"""
pairs = mdz_split(mdz)
expanded_mdz = []
for p in pairs:
expanded_mdz.extend([None] * p[0])
expanded_mdz.extend(list(p[1]))
return expanded_mdz
def mdz_split(mdz):
"""
Splits the MD:Z string in to (num, op) tuples
"""
# md_match = re.findall(r"([0-9]+)(\^?[A-Z]+)?", mdz)
md_match = re.findall(r"([0-9]+)\^?([A-Z]+)?", mdz)
pairs = [(int(i), b) for i, b in md_match]
return pairs
def split_every(n, iterable):
"""
Splits an iterable every n objects
eg. split a string every 50 characters
Returns a list of the iterable object pieces
"""
i = iter(iterable)
piece = list(islice(i, n))
while piece:
yield ''.join(piece)
piece = list(islice(i, n))
|
/sam-alignment-reconstructor-0.0.5.tar.gz/sam-alignment-reconstructor-0.0.5/sam_alignment_reconstructor/pairwise.py
| 0.738292 | 0.484807 |
pairwise.py
|
pypi
|
import os
from datetime import datetime, date
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from fbprophet import Prophet
class Detector:
def __init__(
self,
min_time_points: int = 10,
none_zero_ratio: float = 0.0,
min_dataset_size: int = 0,
image_path: str = 'image.png'
) -> None:
self.ds_min_points = min_time_points
self.none_zero_ratio = none_zero_ratio
self.min_dataset_size = min_dataset_size
self.image_path = image_path
self.x_column_name = 'ds'
self.y_column_name = 'y'
def forecast_today(self, dataset: pd.DataFrame) -> pd.DataFrame:
"""
Forecast today based on history dataset and mark today as anomaly if it's outside of forecasted range
Input should an array of json objects having `time` & `value` fields
Output is an array of json objects having today's forecast & anomaly
:param dataset:
pd.DataFrame([{"time": "2018-02-13", "value": 1069}, {"time": "2018-02-14", "value": 3000}, ...])
data should be aggregated per day for example there should be only one entry (value) for each day
:return: pd.DataFrame of anomalies
each Series has "ds", "trend", "trend_lower", "trend_upper", "yhat_lower", "yhat_upper", "seasonal",
"seasonal_lower", "seasonal_upper", "seasonalities", "seasonalities_lower", "seasonalities_upper",
"weekly", "weekly_lower", "weekly_upper", "yhat", "std", "actual"
For more info check https://facebook.github.io/prophet/
"""
dataset = self._validate_input(dataset)
historical_data = dataset[:-1]
last_day_of_data = dataset[-1:]
todays_forecast = self._get_forecast(historical_data, last_day_of_data)
return self._compare(historical_data, last_day_of_data, todays_forecast)
def _validate_input(self, dataset: pd.DataFrame) -> pd.DataFrame:
x_column_name = 'time'
y_column_name = 'value'
if x_column_name not in dataset.columns or y_column_name not in dataset.columns:
raise ValueError('dataset should have [{}] & [{}] columns'.format(x_column_name, y_column_name))
dataset = dataset.rename(columns={x_column_name: self.x_column_name, y_column_name: self.y_column_name})
dataset[self.x_column_name].apply(lambda t: t.strftime('%Y-%m-%d') if isinstance(t, date) else t)
return dataset
def _get_forecast(self, data: pd.DataFrame, actual: pd.DataFrame) -> pd.DataFrame:
actual_time_points = len(data)
actual_dataset_size = data[self.y_column_name].sum()
if actual_time_points < self.ds_min_points or (
len(data[data[self.y_column_name] == 0]) / len(data) > self.none_zero_ratio
) or actual_dataset_size < self.min_dataset_size:
return pd.DataFrame()
historical_data_last_day = datetime.strptime(data[-1:][self.x_column_name].values[0], '%Y-%m-%d')
forecast_day = datetime.strptime(actual[self.x_column_name].values[0], '%Y-%m-%d')
return self._forecast(
data,
actual,
(forecast_day - historical_data_last_day).days
)
def _forecast(self, data: pd.DataFrame, actual: pd.DataFrame, days_to_forecast: int) -> pd.DataFrame:
model = Prophet(daily_seasonality=False, interval_width=0.8)
prophet_input = pd.DataFrame()
prophet_input['ds'] = data[self.x_column_name]
prophet_input['y'] = data[self.y_column_name]
with suppress_stdout_stderr():
model.fit(prophet_input)
future = model.make_future_dataframe(periods=days_to_forecast)
forecast = model.predict(future)
if self._is_anomaly(actual, forecast[-1:]):
fig = plt.figure(facecolor='w', figsize=(10, 6))
ax = fig.add_subplot(111)
fig = model.plot(forecast, ax)
ax.plot(
[datetime.strptime(d, '%Y-%m-%d') for d in actual[self.x_column_name]],
actual[self.y_column_name],
'rx'
)
ax.plot(
[datetime.strptime(d, '%Y-%m-%d') for d in data[self.x_column_name]],
data[self.y_column_name],
'k-'
)
ax.legend(['history', 'prediction', actual[self.x_column_name].values[0]])
fig.savefig(self.image_path)
return forecast[-1:]
def _compare(self, historical_data: pd.DataFrame, actual: pd.DataFrame, forecast: pd.DataFrame) -> pd.DataFrame:
anomaly = pd.DataFrame()
if actual.empty or forecast.empty:
return pd.DataFrame()
if self._is_anomaly(actual, forecast):
anomaly = forecast
anomaly['prediction'] = forecast['yhat'].values[0]
history_mean = historical_data[self.y_column_name].mean()
actual_value = actual[self.y_column_name].values[0]
anomaly['change'] = (actual_value - history_mean) / history_mean
anomaly['std_change'] = (actual_value - history_mean) / np.std(historical_data[self.y_column_name])
anomaly['actual'] = actual_value
anomaly['image_path'] = self.image_path
return anomaly
def _is_anomaly(self, actual, forecast):
actual_value = actual[self.y_column_name].values[0]
return actual_value > forecast['yhat_upper'].values[0] or \
actual_value < forecast['yhat_lower'].values[0]
class suppress_stdout_stderr(object):
"""
A context manager for doing a "deep suppression" of stdout and stderr
"""
def __init__(self):
# Open a pair of null files
self.null_fds = [os.open(os.devnull, os.O_RDWR) for x in range(2)]
# Save the actual stdout (1) and stderr (2) file descriptors.
self.save_fds = (os.dup(1), os.dup(2))
def __enter__(self):
# Assign the null pointers to stdout and stderr.
os.dup2(self.null_fds[0], 1)
os.dup2(self.null_fds[1], 2)
def __exit__(self, *_):
# Re-assign the real stdout/stderr back to (1) and (2)
os.dup2(self.save_fds[0], 1)
os.dup2(self.save_fds[1], 2)
# Close the null files
os.close(self.null_fds[0])
os.close(self.null_fds[1])
|
/sam_anomaly_detector-2.3.tar.gz/sam_anomaly_detector-2.3/sam_anomaly_detector/detector.py
| 0.701917 | 0.47658 |
detector.py
|
pypi
|
from __future__ import annotations
import dataclasses
from pathlib import Path
from typing import IO, Iterator, List, Optional, Type, Union
from more_itertools import peekable
from xopen import xopen
__all__ = [
"ParsingError",
"SAMFlag",
"SAMHD",
"SAMHeader",
"SAMItem",
"SAMReader",
"SAMSQ",
"read_sam",
]
class ParsingError(Exception):
"""
Parsing error.
"""
def __init__(self, line_number: int):
"""
Parameters
----------
line_number
Line number.
"""
super().__init__(f"Line number {line_number}.")
self._line_number = line_number
@property
def line_number(self) -> int:
"""
Line number.
Returns
-------
Line number.
"""
return self._line_number
@dataclasses.dataclass
class SAMHeader:
"""
SAM header.
Attributes
----------
hd
File-level metadata. Optional. If present, there must be only one
@HD line and it must be the first line of the file.
sq
Reference sequence dictionary. The order of @SQ lines defines the
alignment sorting order.
rg
Read group. Unordered multiple @RG lines are allowed.
"""
hd: Optional[SAMHD] = None
sq: List[SAMSQ] = dataclasses.field(default_factory=lambda: [])
rg: List[str] = dataclasses.field(default_factory=lambda: [])
class SAMFlag:
"""
Bitwise flags.
"""
def __init__(self, flag: int):
self._flag = flag
@property
def value(self) -> int:
return self._flag
@property
def read_paired(self) -> bool:
return self._flag & 0x001 != 0
@property
def read_mapped_in_proper_pair(self) -> bool:
return self._flag & 0x002 != 0
@property
def read_unmapped(self) -> bool:
return self._flag & 0x004 != 0
@property
def mate_unmapped(self) -> bool:
return self._flag & 0x008 != 0
@property
def read_reverse_strand(self) -> bool:
return self._flag & 0x010 != 0
@property
def mate_reverse_strand(self) -> bool:
return self._flag & 0x020 != 0
@property
def first_in_pair(self) -> bool:
return self._flag & 0x040 != 0
@property
def second_in_pair(self) -> bool:
return self._flag & 0x080 != 0
@property
def secondary_alignment(self) -> bool:
return self._flag & 0x100 != 0
@property
def read_fails_filters(self) -> bool:
return self._flag & 0x200 != 0
@property
def read_is_pcr_or_optical_duplicate(self) -> bool:
return self._flag & 0x400 != 0
@property
def supplementary_alignment(self) -> bool:
return self._flag & 0x800 != 0
def __str__(self):
return str(self._flag)
def __repr__(self):
return str(self._flag)
@dataclasses.dataclass
class SAMItem:
"""
SAM item.
Attributes
----------
qname
Query template NAME. Reads/segments having identical QNAME are regarded to come
from the same template. A QNAME `*` indicates the information is unavailable.
flag
Combination of bitwise FLAGs.
rname
Reference sequence name of the alignment.
pos
1-based leftmost mapping position of the first CIGAR operation that "consumes" a
reference base.
mapq
Mapping quality. It equals `−10 log10 Pr{mapping position is wrong}`, rounded to
the nearest integer. A value 255 indicates that the mapping quality is not
available.
cigar
CIGAR string.
rnext
Reference sequence name of the primary alignment of the next read in the
template.
pnext
1-based position of the primary alignment of the next read in the template. Set
as 0 when the information is unavailable.
tlen
Signed observed template length.
seq
Segment sequence. This field can be a `*` when the sequence is not stored.
qual
ASCII of base quality plus 33 (same as the quality string in the Sanger FASTQ
format).
remain
Remaning fields not defined by SAM format.
References
----------
.. [SAMv1] https://samtools.github.io/hts-specs/SAMv1.pdf
"""
qname: str
flag: SAMFlag
rname: str
pos: int
mapq: str
cigar: str
rnext: str
pnext: str
tlen: str
seq: str
qual: str
remain: List[str]
@classmethod
def parse(cls: Type[SAMItem], line: str, line_number: int) -> SAMItem:
values = line.strip().split("\t")
try:
item = cls(
values[0],
SAMFlag(int(values[1])),
values[2],
int(values[3]),
values[4],
values[5],
values[6],
values[7],
values[8],
values[9],
values[10],
values[11:],
)
except Exception:
raise ParsingError(line_number)
return item
def copy(self) -> SAMItem:
"""
Copy of itself.
Returns
-------
SAM item.
"""
from copy import copy
return copy(self)
@dataclasses.dataclass
class SAMHD:
vn: str
so: Optional[str] = None
@classmethod
def parse(cls: Type[SAMHD], line: str, line_number: int) -> SAMHD:
hd = cls(vn="")
fields = line.strip().split("\t")
try:
assert fields[0] == "@HD"
for f in fields[1:]:
key, val = f.split(":")
if key == "VN":
hd.vn = val
elif key == "SO":
hd.so = val
assert hd.vn != ""
except Exception:
raise ParsingError(line_number)
return hd
@dataclasses.dataclass
class SAMSQ:
sn: str
ln: str
@classmethod
def parse(cls: Type[SAMSQ], line: str, line_number: int) -> SAMSQ:
sq = cls("", "")
fields = line.strip().split("\t")
assert fields[0] == "@SQ"
try:
for f in fields[1:]:
key, val = f.split(":")
if key == "SN":
sq.sn = val
elif key == "LN":
sq.ln = val
assert sq.sn != ""
assert sq.ln != ""
except Exception:
raise ParsingError(line_number)
return sq
class SAMReader:
"""
SAM reader.
"""
def __init__(self, file: Union[str, Path, IO[str]]):
"""
Parameters
----------
file
File path or IO stream.
"""
if isinstance(file, str):
file = Path(file)
if isinstance(file, Path):
file = xopen(file, "r")
self._file = file
self._lines = peekable(line for line in file)
self._line_number = 0
self._header = SAMHeader()
try:
next_line: str = self._lines.peek()
except StopIteration:
return
while next_line.startswith("@"):
line = self._next_line()
if line.startswith("@HD"):
self._header.hd = SAMHD.parse(line, self._line_number)
elif line.startswith("@SQ"):
self._header.sq.append(SAMSQ.parse(line, self._line_number))
try:
next_line = self._lines.peek()
except StopIteration:
break
def read_item(self) -> SAMItem:
"""
Get the next item.
Returns
-------
Next item.
"""
line = self._next_line()
return SAMItem.parse(line, self._line_number)
def read_items(self) -> List[SAMItem]:
"""
Get the list of all items.
Returns
-------
List of all items.
"""
return list(self)
def close(self):
"""
Close the associated stream.
"""
self._file.close()
@property
def header(self) -> SAMHeader:
"""
File header.
Returns
-------
Header.
"""
return self._header
def _next_line(self) -> str:
line = next(self._lines)
self._line_number += 1
return line
def __iter__(self) -> Iterator[SAMItem]:
while True:
try:
yield self.read_item()
except StopIteration:
return
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
del exception_type
del exception_value
del traceback
self.close()
def __str__(self) -> str:
return str(self._header)
def read_sam(file: Union[str, Path, IO[str]]) -> SAMReader:
"""
Open a SAM file for reading.
Parameters
----------
file
File path or IO stream.
Returns
-------
SAM reader.
"""
return SAMReader(file)
|
/sam_io-0.0.2-py3-none-any.whl/sam_io/_reader.py
| 0.935854 | 0.420064 |
_reader.py
|
pypi
|
import logging
from collections import defaultdict
from pathlib import Path
from typing import List, Optional, NoReturn, Dict, Set
from pddl.pddl import Domain, Action, Effect
from pyperplan import Parser
from sam_learner.core import PredicatesMatcher, extract_effects, extract_maybe_fluents
from sam_learner.sam_models import GroundedAction, State, TrajectoryComponent, Trajectory, Mode, ComparablePredicate
def contains_duplicates(tested_list: List[str]) -> bool:
"""checks if the list contains duplicates.
:param tested_list: the list to test for duplicates.
:return: whether or not the list contains duplicates.
"""
return len(set(tested_list)) != len(tested_list)
class SAMLearner:
"""Class that represents the safe action model learner algorithm."""
logger: logging.Logger
working_directory_path: Path
trajectories: List[Trajectory]
learned_domain: Domain
matcher: PredicatesMatcher
known_actions: Dict[str, Action]
maybe_effects: Dict[str, Set[ComparablePredicate]]
action_to_triplets_histogram: Dict[str, int]
action_with_duplicate_param_calls: Dict[str, int]
def __init__(
self, working_directory_path: Optional[str] = None, domain_file_name: str = "domain.pddl",
mode: Mode = "production",
domain: Optional[Domain] = None, known_actions: Dict[str, Action] = {}):
self.logger = logging.getLogger(__name__)
self.known_actions = known_actions
self.maybe_effects = defaultdict(set)
self.action_to_triplets_histogram = defaultdict(int)
self.action_with_duplicate_param_calls = defaultdict(int)
if mode == "development":
self.matcher = PredicatesMatcher(domain=domain)
self.learned_domain = domain
return
self.working_directory_path = Path(working_directory_path)
domain_path = self.working_directory_path / domain_file_name
self.learned_domain = Parser(domain_path).parse_domain(read_from_file=True)
self.learned_domain.actions = {}
self.matcher = PredicatesMatcher(domain_path=str(domain_path))
if known_actions is not None:
self.learned_domain.actions = {
name: action for name, action in known_actions.items()
}
def handle_action_effects(
self, grounded_action: GroundedAction, previous_state: State, next_state: State) -> Effect:
"""Finds the effects generated from the previous and the next state on this current step.
:param grounded_action: the grounded action that was executed according to the trajectory.
:param previous_state: the state that the action was executed on.
:param next_state: the state that was created after executing the action on the previous
state.
:return: the effect containing the add and del list of predicates.
"""
grounded_add_effects, grounded_del_effects = extract_effects(previous_state, next_state)
action_effect = Effect()
action_effect.addlist = action_effect.addlist.union(self.matcher.get_possible_literal_matches(
grounded_action, grounded_add_effects))
action_effect.dellist = action_effect.dellist.union(self.matcher.get_possible_literal_matches(
grounded_action, grounded_del_effects))
self.handle_maybe_effects(grounded_action, previous_state, next_state)
return action_effect
def handle_maybe_effects(
self, grounded_action: GroundedAction, previous_state: State, next_state: State) -> NoReturn:
"""Extracts the maybe effects that are caused by the intersection between the previous and the next state.
:param grounded_action: the currently used grounded action.
:param previous_state: the state that the action was executed on.
:param next_state: the state that was created after executing the action on the previous
state.
"""
maybe_effect_fluents = extract_maybe_fluents(previous_state, next_state)
maybe_effects = self.matcher.get_possible_literal_matches(grounded_action, maybe_effect_fluents)
action_name = grounded_action.lifted_action_name
if action_name in self.maybe_effects:
self.maybe_effects[action_name].intersection_update(maybe_effects)
else:
self.maybe_effects[action_name].update(maybe_effects)
def add_new_action(
self, grounded_action: GroundedAction, previous_state: State, next_state: State) -> NoReturn:
"""Create a new action in the domain.
:param grounded_action: the grounded action that was executed according to the trajectory.
:param previous_state: the state that the action was executed on.
:param next_state: the state that was created after executing the action on the previous
state.
"""
self.logger.info(f"Adding the action {grounded_action.activated_action_representation} "
f"to the domain.")
new_action = Action(name=grounded_action.lifted_action_name,
signature=grounded_action.lifted_signature,
precondition=[],
effect=None)
# adding the preconditions each predicate is grounded in this stage.
possible_preconditions = self.matcher.get_possible_literal_matches(grounded_action,
previous_state.facts)
new_action.precondition = list(set(possible_preconditions))
action_effect = self.handle_action_effects(grounded_action, previous_state, next_state)
new_action.effect = action_effect
self.learned_domain.actions[new_action.name] = new_action
self.logger.debug(
f"Finished adding the action {grounded_action.activated_action_representation}.")
def _is_known_action(self, action_name: str) -> bool:
"""Check whether or not the input action is an action that the agent shouldn't learn.
:param action_name: the name of the action that is currently observed in the trajectory.
:return: whether or not the action is already known to the agent.
"""
self.logger.info(f"Updating the action - {action_name}")
if action_name in self.known_actions:
self.logger.debug(f"The action {action_name} is already known to the agent. Skipping!")
return True
return False
def update_action(
self, grounded_action: GroundedAction, previous_state: State, next_state: State) -> NoReturn:
"""Create a new action in the domain.
:param grounded_action: the grounded action that was executed according to the trajectory.
:param previous_state: the state that the action was executed on.
:param next_state: the state that was created after executing the action on the previous
state.
"""
action_name = grounded_action.lifted_action_name
if self._is_known_action(action_name):
return
current_action: Action = self.learned_domain.actions[action_name]
self._update_action_preconditions(current_action, grounded_action, previous_state)
action_effect: Effect = self.handle_action_effects(
grounded_action, previous_state, next_state)
current_action.effect.addlist = current_action.effect.addlist.union(action_effect.addlist)
current_action.effect.dellist = current_action.effect.dellist.union(action_effect.dellist)
self.logger.debug(f"Done updating the action - {grounded_action.lifted_action_name}")
def _update_action_preconditions(
self, current_action: Action, grounded_action: GroundedAction, previous_state: State) -> NoReturn:
"""Updates the preconditions of an action after it was observed at least once.
:param current_action: the action that is being observed.
:param grounded_action: the grounded action that is being executed in the trajectory component.
:param previous_state: the state that was seen prior to the action's execution.
"""
model_preconditions = current_action.precondition.copy()
possible_preconditions = self.matcher.get_possible_literal_matches(
grounded_action, previous_state.facts)
if len(possible_preconditions) > 0:
for precondition in model_preconditions:
if precondition not in possible_preconditions:
current_action.precondition.remove(precondition)
else:
self.logger.warning(f"while handling the action {grounded_action.activated_action_representation} "
f"inconsistency occurred, since we do not allow for duplicates we do not update the "
f"preconditions.")
def _verify_parameter_duplication(self, grounded_action: GroundedAction) -> bool:
"""Verifies if the action was called with duplicated objects in a trajectory component.
:param grounded_action: the grounded action observed in the trajectory triplet.
:return: whther or not the action contains duplicated parameters.
"""
predicate_objects = [signature_item[0] for signature_item in grounded_action.grounded_signature]
if contains_duplicates(predicate_objects):
self.action_with_duplicate_param_calls[grounded_action.lifted_action_name] += 1
return True
return False
def handle_single_trajectory_component(self, component: TrajectoryComponent) -> NoReturn:
"""Handles a single trajectory component as a part of the learning process.
:param component: the trajectory component that is being handled at the moment.
"""
previous_state = component.previous_state
grounded_action = component.grounded_action
next_state = component.next_state
action_name = grounded_action.lifted_action_name
if self._is_known_action(action_name):
self.logger.debug(f"The action - {action_name} is already known to the agent.")
return
self.action_to_triplets_histogram[action_name] += 1
if self._verify_parameter_duplication(grounded_action):
self.logger.warning(f"{grounded_action.activated_action_representation} contains duplicated parameters! "
f"Not suppoerted in SAM.")
return
if action_name not in self.learned_domain.actions:
self.add_new_action(grounded_action, previous_state, next_state)
else:
self.update_action(grounded_action, previous_state, next_state)
def get_actions_appearance_histogram(self) -> Dict[str, int]:
"""Returns the histogram value of the learned actions.
:return: the histogram of the learned actions.
"""
return self.action_to_triplets_histogram
def get_actions_with_duplicated_parameters_histogram(self) -> Dict[str, int]:
"""Returns the histogram value of the learned actions with the duplicated objects.
:return: the histogram of the learned actions where their usage contained duplicated objects.
"""
return self.action_with_duplicate_param_calls
def learn_action_model(self, trajectories: List[Trajectory]) -> Domain:
"""Learn the SAFE action model from the input trajectories.
:param trajectories: the list of trajectories that are used to learn the safe action model.
:return: a domain containing the actions that were learned.
"""
self.logger.info("Starting to learn the action model!")
for trajectory in trajectories:
for component in trajectory:
self.handle_single_trajectory_component(component)
return self.learned_domain
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/sam_learner.py
| 0.898039 | 0.389605 |
sam_learner.py
|
pypi
|
from collections import defaultdict
from typing import Optional, Dict, Set, NoReturn, Iterable, List
from pddl.pddl import Action, Domain, Effect
from .core import ExtendedMatcher, extract_effects, LightProxyActionGenerator
from .sam_learner import SAMLearner
from .sam_models import Mode, GroundedAction, State, ComparablePredicate, Trajectory, SignatureType, TrajectoryComponent
def sort_predicates(predicates: Iterable[ComparablePredicate]) -> List[ComparablePredicate]:
"""Sorts the predicate list so that it could be compared to other lists.
:param predicates: the predicates to sort.
:return: the sorted predicate list.
"""
return sorted(
predicates, key=lambda predicate: (predicate.name, str(predicate.signature)))
class ESAMLearner(SAMLearner):
"""Extension to the SAM Learning algorithm."""
matcher: ExtendedMatcher
proxy_action_generator: LightProxyActionGenerator
# Maps an action name to a dictionary of effect predicate names to their possible bindings.
add_effect_cnfs: Dict[str, Dict[str, Set[ComparablePredicate]]]
delete_effect_cnfs: Dict[str, Dict[str, Set[ComparablePredicate]]]
# Maps an action name to a dictionary of preconditions predicate names to their possible bindings.
actions_possible_preconditions: Dict[str, Set[ComparablePredicate]]
learned_actions_signatures: Dict[str, SignatureType]
def __init__(
self, working_directory_path: Optional[str] = None, domain_file_name: str = "domain.pddl",
mode: Mode = "production", domain: Optional[Domain] = None, known_actions: Dict[str, Action] = {}):
super().__init__(working_directory_path, domain_file_name, mode, domain, known_actions)
self.proxy_action_generator = LightProxyActionGenerator()
self.add_effect_cnfs = {}
self.delete_effect_cnfs = {}
self.actions_possible_preconditions = {}
self.learned_actions_signatures = {}
if mode == "development":
self.matcher = ExtendedMatcher(domain=domain)
return
domain_path = self.working_directory_path / domain_file_name
self.matcher = ExtendedMatcher(domain_path=str(domain_path))
def _add_predicates_cnfs(
self, predicates: List[ComparablePredicate]) -> Dict[str, Set[ComparablePredicate]]:
"""Adds fluents to a CNF clause when needed.
:param predicates: the predicates that have been currently observed.
:return: the dictionary of the fluents after the new information was added.
"""
predicates_cnf = defaultdict(set)
for predicate in predicates:
for index, signature_item in enumerate(predicate.signature):
if type(signature_item[1]) is tuple:
continue
predicate.signature[index] = (signature_item[0], (signature_item[1],))
predicates_cnf[predicate.name].add(predicate)
return predicates_cnf
def _update_preconditions(self, action_name: str,
possible_preconditions: List[ComparablePredicate]) -> NoReturn:
"""Update the precondition for an action that had already been observed.
:param action_name: the name of the action that is currently being learned.
:param possible_preconditions: the fluents that were observed in the current trajectory triplet.
"""
stored_preconditions = self.actions_possible_preconditions[action_name]
self.logger.debug("Removing predicates that don't exist in the current trajectory triplet.")
remaining_preconditions = [predicate for predicate in possible_preconditions if
predicate in stored_preconditions]
self.actions_possible_preconditions[action_name] = set(remaining_preconditions)
def _update_effects_cnfs(self, new_lifted_effects: List[ComparablePredicate],
effects_to_update: Dict[str, Set[ComparablePredicate]]) -> NoReturn:
"""Update the CNF clauses of an already observed action.
:param action_name: the name of the action that is being updated.
:param new_lifted_effects: the newly observed lifted effects observed for the action.
:param effects_to_update: the current CNF clauses that the action has.
"""
new_effects_cnfs = self._add_predicates_cnfs(new_lifted_effects)
not_encountered_cnfs = {}
for predicate_name, new_possible_effect_cnfs in new_effects_cnfs.items():
if predicate_name not in effects_to_update:
self.logger.debug("Adding a new effect that hadn't been encountered before.")
not_encountered_cnfs[predicate_name] = new_possible_effect_cnfs
else:
self.logger.debug("Removing redundant CNF clauses from the effect clauses of the predicate.")
previous_effects = effects_to_update[predicate_name]
effects_to_update[predicate_name] = new_possible_effect_cnfs.intersection(previous_effects)
effects_to_update.update(not_encountered_cnfs)
def add_new_action(
self, grounded_action: GroundedAction, previous_state: State, next_state: State) -> NoReturn:
"""Learns the model of an action that was observed for the first time.
:param grounded_action: the grounded action that was observed in the trajectory triplet.
:param previous_state: the state that was observed prior to the action's exection.
:param next_state: the state that was observed after the action's execution.
"""
action_name = grounded_action.lifted_action_name
self.logger.info(f"Action {action_name} encountered for the first time! Adding its data to the data structure.")
action_signature = grounded_action.lifted_signature
self.learned_actions_signatures[action_name] = action_signature
possible_preconditions = self.matcher.get_possible_literal_matches(grounded_action,
previous_state.facts)
self.actions_possible_preconditions[action_name] = set(possible_preconditions)
grounded_add_effects, grounded_del_effects = extract_effects(previous_state, next_state)
lifted_add_effects = self.matcher.get_possible_literal_matches(grounded_action, grounded_add_effects)
self.add_effect_cnfs[action_name] = self._add_predicates_cnfs(lifted_add_effects)
lifted_del_effects = self.matcher.get_possible_literal_matches(grounded_action, grounded_del_effects)
self.delete_effect_cnfs[action_name] = self._add_predicates_cnfs(lifted_del_effects)
self.logger.debug(f"Finished adding {action_name} information to the data structure")
def update_action(self, grounded_action: GroundedAction, previous_state: State, next_state: State) -> NoReturn:
"""Updates an action that was observed at least once already.
:param grounded_action: the grounded action that was executed according to the trajectory.
:param previous_state: the state that the action was executed on.
:param next_state: the state that was created after executing the action on the previous
state.
"""
action_name = grounded_action.lifted_action_name
self.logger.info(f"Starting to update the action - {action_name}")
observed_pre_state_predicates = self.matcher.get_possible_literal_matches(grounded_action, previous_state.facts)
self._update_preconditions(action_name, observed_pre_state_predicates)
grounded_add_effects, grounded_del_effects = extract_effects(previous_state, next_state)
new_lifted_add_effects = self.matcher.get_possible_literal_matches(grounded_action, grounded_add_effects)
new_lifted_delete_effects = self.matcher.get_possible_literal_matches(grounded_action, grounded_del_effects)
self._update_effects_cnfs(new_lifted_add_effects, self.add_effect_cnfs[action_name])
self._update_effects_cnfs(new_lifted_delete_effects, self.delete_effect_cnfs[action_name])
self.logger.debug(f"Done updating the action - {action_name}")
def _is_proxy_action(self, action_name: str) -> bool:
"""Validate whether or not an action is supposed to be a proxy action due to the fact that it has ambiguous
effects.
:param action_name: the name of the action that is currently being tested.
:return: whether or not an action is a proxy action.
"""
action_add_effects = self.add_effect_cnfs[action_name]
action_del_effects = self.delete_effect_cnfs[action_name]
return any([len(cnf) > 1 for cnf in action_add_effects.values()]) or \
any([len(cnf) > 1 for cnf in action_del_effects.values()])
def create_proxy_actions(self) -> Dict[str, Action]:
"""Create the proxy actions for the cases where there is ambiguity in the learning process.
:return: the actions that the model learned through its execution stage.
"""
learned_actions = {}
for action_name in self.actions_possible_preconditions:
if not self._is_proxy_action(action_name):
self.logger.debug(f"Action - {action_name} has no ambiguities, creating regular action.")
action = self._create_action_from_cnf(action_name)
learned_actions[action_name] = action
else:
# In the light version of the proxy action generator we don't have to remove the constants.
self.logger.debug(f"Creating proxy actions for the action - {action_name}")
proxy_actions = self.proxy_action_generator.create_proxy_actions(
action_name=action_name,
action_signature=self.learned_actions_signatures[action_name],
surely_preconditions=self.actions_possible_preconditions[action_name],
add_effect_cnfs=self.add_effect_cnfs[action_name],
delete_effect_cnfs=self.delete_effect_cnfs[action_name]
)
for action in proxy_actions:
learned_actions[action.name] = action
return learned_actions
def _create_action_from_cnf(self, action_name: str) -> Action:
"""Create the action object from the CNF clauses collected through the algorithm's execution.
:param action_name: the name of the action that is currently being created.
:return: the action that was created from the CNF clauses (not proxy action).
"""
action_add_effect_cnf = self.add_effect_cnfs[action_name]
action_delete_effect_cnf = self.delete_effect_cnfs[action_name]
add_effects = set()
delete_effects = set()
for add_fluents in action_add_effect_cnf.values():
add_effects.update(add_fluents)
for delete_fluents in action_delete_effect_cnf.values():
delete_effects.update(delete_fluents)
effect = Effect()
effect.addlist = add_effects
effect.dellist = delete_effects
return Action(name=action_name, signature=self.learned_actions_signatures[action_name],
precondition=list(self.actions_possible_preconditions[action_name]), effect=effect)
def handle_single_trajectory_component(self, component: TrajectoryComponent) -> NoReturn:
"""Handles a single trajectory component as a part of the learning process.
:param component: the trajectory component that is being handled at the moment.
"""
previous_state = component.previous_state
grounded_action = component.grounded_action
next_state = component.next_state
action_name = grounded_action.lifted_action_name
if self._is_known_action(action_name):
self.logger.debug(f"The action - {action_name} is already known to the agent.")
return
self._verify_parameter_duplication(grounded_action)
self.action_to_triplets_histogram[action_name] += 1
if action_name not in self.learned_actions_signatures:
self.add_new_action(grounded_action, previous_state, next_state)
else:
self.update_action(grounded_action, previous_state, next_state)
def learn_action_model(self, trajectories: List[Trajectory]) -> Domain:
"""Learn the SAFE action model from the input trajectories.
:param trajectories: the list of trajectories that are used to learn the safe action model.
:return: a domain containing the actions that were learned.
"""
self.logger.info("Starting to learn the action model!")
for trajectory in trajectories:
for component in trajectory:
self.handle_single_trajectory_component(component)
learned_actions = self.create_proxy_actions()
learned_actions.update(self.known_actions)
self.learned_domain.actions = learned_actions
return self.learned_domain
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/extended_sam_learner.py
| 0.956977 | 0.329365 |
extended_sam_learner.py
|
pypi
|
from collections import defaultdict
from pathlib import Path
from typing import NoReturn, List, Dict
from pddl.pddl import Domain, Action, Predicate, Type
class DomainExporter:
"""Class that is able to export a domain to a correct PDDL file."""
def write_action_preconditions(self, predicates: List[Predicate]) -> str:
"""Writes the predicates of an action's precondition according to the PDDL file format.
:param predicates: the predicates that are in the domain's definition.
:return: the formatted string representing the domain's predicates.
"""
formatted_preconditions = "{content}"
if len(predicates) > 1:
formatted_preconditions = "(and {content})"
action_predicates = self._write_positive_predicates(predicates)
return formatted_preconditions.format(content=" ".join(action_predicates))
@staticmethod
def _write_positive_predicates(predicates: List[Predicate]) -> List[str]:
"""writes positive predicates according to the format of a PDDL file.
:param predicates: the preconditions / effects of an action.
:return: the formatted positive predicates.
"""
action_predicates = []
for predicate in predicates:
predicate_formatted_signature = " ".join([f"{name}" for name, _ in predicate.signature])
predicate_str = f"({predicate.name} {predicate_formatted_signature})"
action_predicates.append(predicate_str)
return action_predicates
@staticmethod
def _write_negative_predicates(predicates: List[Predicate]) -> List[str]:
"""writes negative predicates according to the format of a PDDL file.
:param predicates: the preconditions / effects of an action.
:return: the formatted negative predicates.
"""
action_predicates = []
for predicate in predicates:
predicate_formatted_signature = " ".join([f"{name}" for name, _ in predicate.signature])
predicate_str = f"(not ({predicate.name} {predicate_formatted_signature}))"
action_predicates.append(predicate_str)
return action_predicates
def write_action_effects(self, add_effects: List[Predicate], delete_effects: List[Predicate]) -> str:
"""Write the effects of an action according to the PDDL file format.
:param add_effects: the add effects of an action.
:param delete_effects: the delete effects of an action.
:return: the formatted string representing the action's effects.
"""
action_effect_predicates = self._write_positive_predicates(add_effects)
action_effect_predicates += self._write_negative_predicates(delete_effects)
formatted_effects = "{content}"
if len(action_effect_predicates) > 1:
formatted_effects = "(and {content})"
return formatted_effects.format(content=" ".join(action_effect_predicates))
def write_action(self, action: Action) -> str:
"""Write the action formatted string from the action data.
:param action: The action that needs to be formatted into a string.
:return: the string format of the action.
"""
action_params = " ".join([f"{name} - {types[0]}" for name, types in action.signature])
action_preconds = self.write_action_preconditions(action.precondition)
action_effects = self.write_action_effects(action.effect.addlist, action.effect.dellist)
return f"(:action {action.name}\n" \
f"\t:parameters ({action_params})\n" \
f"\t:precondition {action_preconds}\n" \
f"\t:effect {action_effects})\n" \
f"\n"
@staticmethod
def write_predicates(predicates: Dict[str, Predicate]) -> str:
"""Writes the predicates formatted according to the domain file format.
:param predicates: the predicates that are in the domain's definition.
:return: the formatted string representing the domain's predicates.
"""
predicates_str = "(:predicates\n{predicates})\n\n"
predicates_strings = []
for predicate_name, predicate in predicates.items():
predicate_params = " ".join(
[f"{name} - {types[0]}" for name, types in predicate.signature])
predicates_strings.append(f"\t({predicate_name} {predicate_params})")
return predicates_str.format(predicates="\n".join(predicates_strings))
def write_constants(self, constants: Dict[str, Type]) -> str:
"""Writes the constants of the domain to the new domain file.
:param constants: the constants that appear in the domain object.
:return: the representation of the constants as a canonical PDDL string.
"""
constants_str = "(:constants\n{constants})\n\n"
sorted_consts_types = defaultdict(list)
for constant_name, constant_type in constants.items():
sorted_consts_types[constant_type.name].append(constant_name)
constants_content = []
for pddl_type_name, sub_types in sorted_consts_types.items():
type_like_object_pddl_str = "\t"
type_like_object_pddl_str += " ".join([child_type for child_type in sub_types])
type_like_object_pddl_str += f"\t- {pddl_type_name}"
constants_content.append(type_like_object_pddl_str)
return constants_str.format(constants="\n".join(constants_content))
@staticmethod
def format_type_like_string(sorted_type_like_objects: Dict[str, List[str]]) -> List[str]:
"""formats the string that are of the same format as types. This applies to both consts and types.
:param sorted_type_like_objects: the type like objects that are being formatted into a list of strings.
:return: the formatted strings as a list.
"""
type_like_object_content = []
for pddl_type_name, sub_types in sorted_type_like_objects.items():
type_like_object_pddl_str = "\t"
type_like_object_pddl_str += "\n\t".join([child_type for child_type in sub_types[:-1]])
type_like_object_pddl_str += f"\n\t{sub_types[-1]} - {pddl_type_name}"
type_like_object_content.append(type_like_object_pddl_str)
return type_like_object_content
def write_types(self, types: Dict[str, Type]) -> str:
"""Writes the definitions of the types according to the PDDL file format.
:param types: the types that are available in the learned domain.
:return: the formatted string representing the types in the PDDL domain file.
"""
types_str = "(:types\n{types_content})\n\n"
sorted_types = defaultdict(list)
for type_name, pddl_type in types.items():
if pddl_type.parent is not None:
sorted_types[pddl_type.parent.name].append(type_name)
else:
continue
types_content = self.format_type_like_string(sorted_types)
return types_str.format(types_content="\n".join(types_content))
def export_domain(self, domain: Domain, export_path: Path) -> NoReturn:
"""Export the domain object to a correct PDDL file.
:param domain: the learned domain object.
:param export_path: the path to the file that the domain would be exported to.
"""
domain_types = self.write_types(domain.types)
domain_consts = self.write_constants(domain.constants) if len(domain.constants) > 0 else ""
domain_headers = f"(define (domain {domain.name})\n" \
"(:requirements :strips :typing)\n\n" \
f"{domain_types}\n" \
f"{domain_consts}" \
"{domain_content})"
domain_content = self.write_predicates(domain.predicates)
for action in domain.actions.values():
num_preconditions = len(action.precondition)
num_effects = len(action.effect.addlist) + len(action.effect.dellist)
if num_preconditions == 0 or num_effects == 0:
# will not write an action that is missing its preconditions or effects.
continue
domain_content += self.write_action(action)
with open(export_path, "wt") as export_domain_file:
export_domain_file.write(domain_headers.format(domain_content=domain_content))
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/domain_export.py
| 0.904732 | 0.475605 |
domain_export.py
|
pypi
|
import logging
import os
import pickle
from pathlib import Path
from typing import Tuple, List, NoReturn, Optional
from pddl.pddl import Domain
from sam_learner.core import TrajectoryGenerator
from sam_learner.sam_models import Trajectory
class TrajectorySerializationManager:
"""class that manages the serialization processes of the trajectories."""
working_directory_path: Path
logger: logging.Logger
domain_path: Path
def __init__(self, workdir_path: Path, domain_path: Path):
self.working_directory_path = workdir_path
self.domain_path = domain_path
self.logger = logging.getLogger(__name__)
def get_problem_and_solution_files(self) -> List[Tuple[Path, Path]]:
"""Get the problem and the solution file paths from the working directory.
:return: the paths to the problems and their respected plans.
"""
paths = []
for solution_file_path in self.working_directory_path.glob("*.solution"):
problem_file_name = solution_file_path.stem.split("_plan")[0]
problem_path = self.working_directory_path / f"{problem_file_name}.pddl"
paths.append((problem_path, solution_file_path))
return paths
def create_trajectories(self, serialization_file_name: Optional[str] = None) -> List[Trajectory]:
"""Create the trajectories that will be used in the main algorithm.
:return: the list of trajectories that will be used in the SAM algorithm.
"""
self.logger.info("Creating the trajectories for the algorithm.")
trajectories = []
if serialization_file_name is not None:
stored_trajectories_path = self.working_directory_path / serialization_file_name
else:
stored_trajectories_path = self.working_directory_path / "saved_trajectories"
if stored_trajectories_path.exists():
return self.load_trajectories(stored_trajectories_path)
for problem_path, plan_path in self.get_problem_and_solution_files():
generator = TrajectoryGenerator(str(self.domain_path), str(problem_path))
trajectories.append(generator.generate_trajectory(str(plan_path)))
self.store_trajectories(stored_trajectories_path, trajectories)
return trajectories
def create_trajectories_fama_format(self) -> Tuple[List[Path], List[Trajectory]]:
"""Create the trajectories in the files that FAMA learner can use in the learning process.
:return: the list of paths to the trajectories.
"""
self.logger.info("Creating the trajectories for the FAMA algorithm in the correct format.")
trajectory_paths = []
generated_trajectories = []
for index, (problem_path, plan_path) in enumerate(self.get_problem_and_solution_files()):
generator = TrajectoryGenerator(str(self.domain_path), str(problem_path))
fama_trajectory, generated_trajectory = generator.create_trajectory_in_fama_format(plan_path)
trajectory_file_path = self.working_directory_path / f"{self.domain_path.stem}_trajectory{index}"
trajectory_paths.append(trajectory_file_path)
generated_trajectories.append(generated_trajectory)
with open(trajectory_file_path, 'w') as output:
output.write(fama_trajectory)
return trajectory_paths, generated_trajectories
def load_trajectories(self, stored_trajectories_path: Path) -> List[Trajectory]:
"""Loads the trajectories from the trajectories file.
:param stored_trajectories_path: the path to the files that stores the trajectories.
:return: the loaded deserialized trajectories.
"""
self.logger.debug("Loading the trajectories from the file!")
with open(stored_trajectories_path, "rb") as trajectories_file:
return pickle.load(trajectories_file)
def store_trajectories(self, stored_trajectories_path: Path, trajectories: List[Trajectory]) -> NoReturn:
"""Store the trajectories in the trajectory file so that future runs of the algorithm would be faster.
:param stored_trajectories_path: the path to the file that stores the trajectories.
:param trajectories: the trajectories that are to be stored in the file.
"""
with open(stored_trajectories_path, "wb") as trajectories_file:
self.logger.debug("Saving the created trajectories in a file for future usage.")
pickle.dump(trajectories, trajectories_file)
def update_stored_trajectories(self, trajectory: Trajectory, save_path: Optional[Path] = None) -> NoReturn:
"""Serialize the new trajectory.
:param trajectory: the trajectory to serialize.
:param save_path: the path to the file that saves the trajectories.
"""
trajectories_path = self.working_directory_path / "saved_trajectories" if save_path is None else save_path
with open(trajectories_path, "rb") as trajectories_file:
trajectories = pickle.load(trajectories_file)
trajectories.append(trajectory)
self.store_trajectories(trajectories_path, trajectories)
def delete_trajectories_file(self, trajectories_file_path: Optional[Path] = None) -> NoReturn:
"""deletes the file that contains the saved trajectories.
:param trajectories_file_path: the path to the trajectories file.
"""
trajectories_path = self.working_directory_path / "saved_trajectories" if \
trajectories_file_path is None else trajectories_file_path
os.remove(trajectories_path)
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/trajectories_manager.py
| 0.879755 | 0.288892 |
trajectories_manager.py
|
pypi
|
import logging
import random
import sys
from pathlib import Path
import grounding
from pddl.parser import Parser
from pddl.pddl import Domain, Problem
from typing import NoReturn, List
from task import Task
from sam_learner.sam_models.state import State
class RandomWalkPlansGenerator:
"""Class that generates plans for a domain and a problem using random walk algorithm.
Attributes:
logger: the logger of the class.
domain: the PDDL domain data.
problem: the PDDL problem data.
"""
logger: logging.Logger
domain: Domain
problem: Problem
problem_path: Path
output_directory_path: Path
def __init__(self, domain_path: str, problem_path: str, output_directory_path: str):
parser = Parser(domain_path, problem_path)
self.logger = logging.getLogger(__name__)
self.domain = parser.parse_domain(read_from_file=True)
self.problem = parser.parse_problem(dom=self.domain, read_from_file=True)
self.problem_path = Path(problem_path)
self.output_directory_path = Path(output_directory_path)
def generate_single_plan(self, task: Task, max_plan_steps: int) -> List[str]:
"""Generate a plan from a randomly selected applicable actions (the random walk).
:param task: the task that generates the possible transitions from the initial state.
:param max_plan_steps: the maximal length of the generated plans.
:return: a list containing the action sequences that were generated.
"""
if max_plan_steps <= 0:
raise ValueError("Given illegal value of steps for the plan!")
self.logger.info(f"Starting to generate a plan for the domain - {self.domain.name} and "
f"the problem - {self.problem.name}")
actions_sequence = []
current_state = State(self.problem.initial_state, self.domain).ground_facts()
num_steps = 0
while num_steps < max_plan_steps:
possible_transitions = task.get_successor_states(current_state)
if len(possible_transitions) == 0:
return actions_sequence
action, state = random.choice(possible_transitions)
self.logger.debug(f"generated the applicable action {action}")
actions_sequence.append(f"{action.name}\n")
current_state = state
num_steps += 1
actions_sequence[-1] = actions_sequence[-1].strip("\n")
return actions_sequence
def generate_plans(self, max_plan_steps: int, num_plans: int) -> List[List[str]]:
"""Generate plans with maximal length given as input.
:param max_plan_steps: the maximal length of the output plans.
:param num_plans: the number of plans to generate.
:returns the plans as action sequences.
"""
grounded_planning_task: Task = grounding.ground(problem=self.problem)
plans = []
while len(plans) < num_plans:
plan = self.generate_single_plan(grounded_planning_task, max_plan_steps)
if len(plan) == 0:
continue
plans.append(plan)
return plans
def export_plans(self, generated_plans: List[List[str]], plan_length: int) -> NoReturn:
"""Export the plans to plan file according to the correct format.
:param generated_plans: the plans that were generated using random walk.
:param plan_length: the length of the generated plans.
"""
for index, plan in enumerate(generated_plans, 1):
with open(f"{str(self.output_directory_path / self.problem_path.stem)}_plan_"
f"{index}_max_len_{plan_length}.solution", "wt") as plan_file:
plan_file.writelines(plan)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
directory_path = sys.argv[1]
domain_path = sys.argv[2]
problem_files_glob = sys.argv[3]
for file_path in Path(directory_path).glob(f"{problem_files_glob}*.pddl"):
print(f"working on - {file_path}")
gen = RandomWalkPlansGenerator(domain_path,
file_path,
directory_path)
plans = gen.generate_plans(max_plan_steps=30, num_plans=5)
gen.export_plans(generated_plans=plans, plan_length=30)
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/random_walk_plan_generator.py
| 0.698844 | 0.487246 |
random_walk_plan_generator.py
|
pypi
|
import copy
import logging
from typing import NoReturn, List, Tuple, Optional
from pddl.parser import Parser
from pddl.pddl import Type, Domain
from sam_learner.sam_models import SignatureType
from sam_learner.sam_models.comparable_predicate import ComparablePredicate
from sam_learner.sam_models.grounded_action import GroundedAction
def validate_no_duplicates(tested_list: List[str]) -> NoReturn:
"""Validate that the predicate has only one possible match in the literal.
:param tested_list: the list to test for duplicates.
"""
contains_duplicates = len(set(tested_list)) != len(tested_list)
if contains_duplicates:
raise ValueError(f"No duplications allowed! The predicates received - {tested_list}")
class PredicatesMatcher:
"""Class that matches predicates according to the needed properties in the learning process."""
matcher_domain: Domain
logger: logging.Logger
def __init__(self, domain_path: Optional[str] = None, domain: Optional[Domain] = None):
self.logger = logging.getLogger(__name__)
assert not (domain_path and domain)
if domain_path is not None:
self.matcher_domain = Parser(domain_path).parse_domain(read_from_file=True)
if domain is not None:
self.matcher_domain = Domain(
name=domain.name,
types=domain.types,
predicates={name: ComparablePredicate(p.name, p.signature) for name, p in domain.predicates.items()},
actions={},
constants={name: constant for name, constant in domain.constants.items()}
)
def search_for_parameters_in_constants(
self, possible_predicate: ComparablePredicate, grounded_predicate: ComparablePredicate) -> NoReturn:
"""Search for a match in the constants.
:param possible_predicate: the partially matched predicate created by the matcher.
:param grounded_predicate: the grounded predicate that is seen in the trajectory.
"""
predicate_objects = [signature_item[0] for signature_item in grounded_predicate.signature]
domain_predicate = self.matcher_domain.predicates[grounded_predicate.name]
self.logger.debug("Searching for the predicate's parameters in the constants.")
for index, constant_name in enumerate(self.matcher_domain.constants):
if constant_name not in predicate_objects:
continue
literal_object_index = predicate_objects.index(constant_name)
if domain_predicate.signature[literal_object_index] != \
possible_predicate.signature[literal_object_index]:
self.logger.debug("The parameters was already found in the action's signature, Skipping.")
continue
possible_predicate.signature[literal_object_index] = (
constant_name, self.matcher_domain.constants[constant_name])
def match_predicate_to_action_literals(
self, grounded_predicate: ComparablePredicate,
grounded_signature: SignatureType,
lifted_signature: SignatureType) -> Optional[ComparablePredicate]:
"""Match a literal to a possible lifted precondition for the input action.
:param grounded_predicate: the grounded predicate that represent part of the previous state.
:param grounded_signature: the signature of the action that contains the actual objects
that the action was executed on.
:param lifted_signature: the lifted signature of the action, is accessible from the
trajectory.
:return: a precondition, in case the action and the predicate contain matching objects,
None otherwise.
"""
predicate_objects = [signature_item[0] for signature_item in grounded_predicate.signature]
grounded_action_objects = [signature_item[0] for signature_item in grounded_signature]
constants_names = []
if len(self.matcher_domain.constants) > 0:
constants_names = [name for name in self.matcher_domain.constants]
validate_no_duplicates(predicate_objects)
validate_no_duplicates(grounded_action_objects)
if not set(predicate_objects).issubset(set(grounded_action_objects).union(constants_names)):
self.logger.debug("The predicate does not match the action with the constants")
return None
domain_predicate = self.matcher_domain.predicates[grounded_predicate.name]
possible_signature = [(item[0], item[1]) for item in domain_predicate.signature]
possible_predicate_match: ComparablePredicate = ComparablePredicate(
domain_predicate.name, possible_signature)
for index, (action_object_name, object_types) in enumerate(grounded_signature):
if action_object_name not in predicate_objects:
continue
literal_object_index = predicate_objects.index(action_object_name)
parameter_name, parameter_types = lifted_signature[index]
possible_predicate_match.signature[literal_object_index] = (
parameter_name, parameter_types)
self.search_for_parameters_in_constants(possible_predicate_match, grounded_predicate)
return possible_predicate_match
def get_possible_literal_matches(
self, grounded_action: GroundedAction, literals: List[ComparablePredicate]) -> List[ComparablePredicate]:
"""Get a list of possible preconditions for the action according to the previous state.
:param grounded_action: the grounded action that was executed according to the trajectory.
:param literals: the list of literals that we try to match according to the action.
:return: a list of possible preconditions for the action that is being executed.
"""
possible_matches = []
lifted_signature = grounded_action.lifted_signature
grounded_signature = grounded_action.grounded_signature
for predicate in literals:
try:
matches = self.match_predicate_to_action_literals(
predicate, grounded_signature, lifted_signature)
except ValueError as error:
self.logger.debug(f"When parsing {grounded_action.activated_action_representation}, "
f"with the predicate {str(predicate)} "
f"got the error {error}! proceeding!")
matches = None
if matches is None:
continue
possible_matches.append(copy.deepcopy(matches))
return possible_matches
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/predicates_matcher.py
| 0.849332 | 0.422445 |
predicates_matcher.py
|
pypi
|
from pathlib import Path
from typing import NoReturn, List, Dict
from pddl.pddl import Predicate, Type, Problem
class ProblemExporter:
"""Class that is able to export a domain to a correct PDDL file."""
@staticmethod
def write_objects(problem_objects: Dict[str, Type]) -> str:
"""Writes the definitions of the types according to the PDDL file format.
:param problem_objects: the objects that are available in the learned domain.
:return: the formatted string representing the objects in the PDDL problem file.
"""
objects_str = "(:objects\n{objects_content}\n)\n"
objects = []
for object_name, pddl_type in problem_objects.items():
objects.append(f"\t{object_name} - {pddl_type.name}")
return objects_str.format(objects_content="\n".join(objects))
@staticmethod
def write_initial_state(initial_state: List[Predicate]) -> str:
"""Writes the definitions of the types according to the PDDL file format.
:param initial_state: the objects that are available in the learned domain.
:return: the formatted string representing the state in the PDDL problem file.
"""
state_str = "(:init\n{state_content}\n)\n"
predicates = ProblemExporter.extract_state_predicates(initial_state)
return state_str.format(state_content="\n".join(predicates))
@staticmethod
def write_goal_state(goal_state: List[Predicate]) -> str:
"""Writes the definitions of the types according to the PDDL file format.
:param goal_state: the objects that are available in the learned domain.
:return: the formatted string representing the state in the PDDL problem file.
"""
state_str = "(:goal\n\t(and\n{state_content}\t\n)\n)\n"
predicates = ProblemExporter.extract_state_predicates(goal_state)
return state_str.format(state_content="\n".join(predicates))
@staticmethod
def extract_state_predicates(state: List[Predicate]) -> List[str]:
"""Extract the needed problem predicates for the PDDL file representation.
:param state: the state to write in a PDDL format.
:return: the strings of containing the state's data.
"""
predicates = []
for predicate in state:
predicate_objects = [obj[0] for obj in predicate.signature]
objects_str = " ".join(predicate_objects)
predicates.append(f"\t({predicate.name} {objects_str})")
predicates = list(set(predicates))
return predicates
def export_problem(self, problem: Problem, export_path: Path) -> NoReturn:
"""Export the domain object to a correct PDDL file.
:param problem: the problem object to export to a PDDL file.
:param export_path: the path to the file that the domain would be exported to.
"""
problem_objects = self.write_objects(problem.objects)
initial_state = self.write_initial_state(problem.initial_state)
goal_state = self.write_goal_state(problem.goal)
problem_data = f"(define (problem {problem.name}) (:domain {problem.domain.name})\n" \
f"{problem_objects}\n" \
f"{initial_state}\n" \
f"{goal_state}\n)"
with open(export_path, "wt") as export_problem_file:
export_problem_file.write(problem_data)
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/problem_export.py
| 0.895617 | 0.633226 |
problem_export.py
|
pypi
|
import logging
import sys
from http import HTTPStatus
from pathlib import Path
from typing import NoReturn
import requests
from requests import Response
SOLVE_URL = 'http://solver.planning.domains/solve'
SOLVE_AND_VALIDATE_URL = 'http://solver.planning.domains/solve-and-validate'
class RemotePlansGenerator:
"""Class that uses an external service to generate plans for the learner algorithm.
Attributes:
logger: the logger of the class.
"""
logger = logging.Logger
def __init__(self):
self.logger = logging.getLogger(__name__)
def export_plan_from_response(self, problems_directory_path: str, problem_file_path: Path,
response: Response) -> NoReturn:
"""Export the plan if exists into a solution file.
:param problems_directory_path: the directory in which we export the output file to.
:param problem_file_path: the path to the problem file (used to generate the solution
file's name.
:param response: the response that was returned from the solving server.
"""
if response.status_code < HTTPStatus.BAD_REQUEST:
response_data: dict = response.json()
if "plan" not in response_data["result"]:
return
if response_data["result"]["val_status"] == "err":
self.logger.debug(response_data["result"]["val_stdout"])
return
with open(Path(problems_directory_path, f"{problem_file_path.stem}_plan.solution"),
"wt") as plan_file:
self.logger.debug("Solution Found!")
plan_file.write(
'\n'.join([action["name"] for action in response_data["result"]["plan"]]))
def generate_plans(self, domain_file_path: str, problems_directory_path: str, validate: bool = False) -> NoReturn:
"""Generates the plans using the solver that exists in the web.
:param domain_file_path: the path to the domain file.
:param problems_directory_path: the path to the directory containing the problems needed to solve.
:param validate: whether or not to validate the input plans.
"""
for file_path in Path(problems_directory_path).glob("*.pddl"):
self.logger.info(f"Solving the problem {file_path.stem}")
with open(domain_file_path, "rt") as domain_file, open(file_path, "rt") as problem_file:
data = {"domain": domain_file.read(), "problem": problem_file.read()}
url = SOLVE_AND_VALIDATE_URL if validate else SOLVE_URL
response: Response = requests.post(url, verify=False, json=data)
self.export_plan_from_response(problems_directory_path, file_path, response)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
RemotePlansGenerator().generate_plans(domain_file_path=sys.argv[1], problems_directory_path=sys.argv[2])
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/remote_plan_generator.py
| 0.66236 | 0.23316 |
remote_plan_generator.py
|
pypi
|
import logging
from typing import List, Tuple, NoReturn
import grounding
from pathlib import Path
from pddl.pddl import Problem, Domain, Action, Predicate
from pyperplan import Parser
from task import Operator
from sam_learner.core.grounded_action_locator import parse_plan_action_string, locate_lifted_action, \
ground_lifted_action
from sam_learner.sam_models.parameter_binding import ParameterBinding
from sam_learner.sam_models.state import State
from sam_learner.sam_models.trajectory_component import TrajectoryComponent
from sam_learner.sam_models.types import Trajectory
class TrajectoryGenerator:
"""Class that generates trajectory out of a problem file and it's corresponding plan."""
logger: logging.Logger
domain: Domain
problem: Problem
def __init__(self, domain_path: str, problem_path: str):
parser = Parser(domain_path, problem_path)
self.logger = logging.getLogger(__name__)
self.domain = parser.parse_domain(read_from_file=True)
self.problem = parser.parse_problem(dom=self.domain, read_from_file=True)
def _parse_plan_actions(self, plan_path: str) -> Tuple[List[str], List[Action]]:
"""Parse the actions that exist in the plan file and extract the action objects with the
object binding.
:param plan_path: the path to the file containing the plan that solves the input problem.
:return: both the grounded operator names and the actions to execute.
"""
self.logger.debug(f"Parsing the plan file in the path - {plan_path}")
with open(plan_path, "rt") as plan_file:
lines = plan_file.readlines()
lines = [line.strip("\n") for line in lines]
if "cost" in lines[-1]:
lines.pop() # remove the cost line from the plan.
actions = []
self.logger.debug("The plan contains the following grounded actions:")
for index, line in enumerate(lines):
self.logger.debug(line)
action_name, action_params = parse_plan_action_string(line)
if len(action_params) == 0:
lines[index] = f"({action_name})"
actions.append(locate_lifted_action(self.domain, action_name))
return lines, actions
def _create_state_parameter_matching(self, grounded_state: frozenset) -> State:
"""Matches between the grounded and the lifted predicates.
:param grounded_state: the grounded set of facts that represent the state.
:return: a match between the grounded and the lifted predicates.
"""
lifted_state_data: List[Tuple[Predicate, List[ParameterBinding]]] = \
State.generate_problem_state(grounded_state, self.domain, self.problem)
grounded_predicates = []
for predicate, bindings in lifted_state_data:
signature = [binding.bind_parameter() for binding in bindings]
grounded_predicates.append(Predicate(predicate.name, signature))
return State(grounded_predicates, self.domain)
def _create_single_trajectory_component(
self, index: int, action: Action, op_name: str,
operators: dict, previous_state: State) -> TrajectoryComponent:
"""Create a single trajectory component by applying the action on the previous state.
:param index: the index of the step that is being parsed currently.
:param action: the lifted action that is to be executed on the state.
:param op_name: the grounded operator's name.
:param operators: the grounded operators that can apply an action on a state.
:param previous_state: the previous state to be changed.
:return: the trajectory component representing the current stage.
"""
grounded_action: Operator = operators[op_name]
grounded_next_state_statements = grounded_action.apply(previous_state.ground_facts())
next_state = self._create_state_parameter_matching(grounded_next_state_statements)
_, action_objects = parse_plan_action_string(grounded_action.name)
_, bindings = ground_lifted_action(action, action_objects)
component = TrajectoryComponent(index=index,
previous_state=previous_state,
action=action,
action_parameters_binding=bindings,
next_state=next_state)
self.logger.debug(f"Finished creating the component:\n{component}")
return component
def generate_trajectory(self, plan_path: str, should_return_partial_trajectory: bool = False) -> Trajectory:
"""Generates a trajectory out of a problem file and a plan file.
:param plan_path the path to the plan generated by a solver.
:param should_return_partial_trajectory: whether or not to return a partial trajectory in case of failure.
:return: the trajectory that represents the plan file.
Note:
the plan output should be in the form of: (load-truck obj23 tru2 pos2) ...
"""
self.logger.info(f"Generating a trajectory from the file: {self.problem.name}")
trajectory = []
grounded_planning_task = grounding.ground(problem=self.problem)
operators = {operator.name: operator for operator in grounded_planning_task.operators}
op_names, plan_actions_sequence = self._parse_plan_actions(plan_path)
previous_state = State(self.problem.initial_state, self.domain)
self.logger.info("Starting to iterate over the actions sequence.")
for index, (op_name, action) in enumerate(zip(op_names, plan_actions_sequence)):
try:
component = self._create_single_trajectory_component(
index, action, op_name, operators, previous_state)
trajectory.append(component)
previous_state = component.next_state
except AssertionError:
error_message = f"The operation {op_name} is not applicable! The failed action - {action.name}"
self.logger.warning(error_message)
if should_return_partial_trajectory:
self.logger.debug(f"Returning partial trajectory since the flag was turned on.")
return trajectory
raise AssertionError(error_message)
return trajectory
def _format_trajectory_fama_problem_objects(self) -> str:
"""Formats the trajectory's problem objects in the format that the algorithm can read."""
problem_objects = self.problem.objects
return " ".join([f"{object_name} - {object_type.name}" for object_name, object_type in problem_objects.items()])
def create_trajectory_in_fama_format(self, plan_path: Path) -> Tuple[str, Trajectory]:
"""Create the trajectory output in a format that FAMA algorithm can use to read.
:param plan_path: the path to the plan file of the current problem.
:return: the string representation of the trajectory in the format that FAMA can easily read.
"""
trajectory = self.generate_trajectory(str(plan_path))
trajectory_str = ""
trajectory_str += f"(trajectory\n\n(:objects {self._format_trajectory_fama_problem_objects()})\n\n"
initial_state = trajectory[0].previous_state
trajectory_str += f"(:init {' '.join([fact for fact in initial_state.ground_facts()])})\n\n"
for trajectory_component in trajectory:
trajectory_str += f"(:action {trajectory_component.grounded_action.activated_action_representation})\n\n"
trajectory_str += f"(:state " \
f"{' '.join([fact for fact in trajectory_component.next_state.ground_facts()])})\n\n"
trajectory_str += ")"
return trajectory_str, trajectory
def validate_trajectory(self, trajectory: Trajectory) -> bool:
"""Validate that the last state in the trajectory is indeed the goal state.
:param trajectory: the trajectory to validate.
:return: whether or not the trajectory ends with the goal state.
"""
last_component: TrajectoryComponent = trajectory[-1]
last_state = last_component.next_state
grounded_goals = grounding.ground(problem=self.problem).goals
return grounded_goals <= last_state.ground_facts()
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/trajectory_generator.py
| 0.860002 | 0.464416 |
trajectory_generator.py
|
pypi
|
from itertools import combinations, product
from typing import List, Dict, Set, Any, Tuple
from pddl.pddl import Action, Effect
from sam_learner.sam_models import ComparablePredicate, SignatureType
def n_choose_k(array: List[Any], k: int, effect_type: str) -> List[Tuple[Any, ...]]:
"""Implementation of the combinatorial choosing method.
:param array: the array containing the items that we are choosing from the subset.
:param k: the number of elements to choose from the list.
:param effect_type: the type of the effect, i.e. add or delete effect.
:return: the combination of items based on the input number of elements.
"""
fluent_combinations = list(combinations(array, k))
return [(*combination, effect_type) for combination in fluent_combinations]
class LightProxyActionGenerator:
"""Class that is able to know whether an action contains duplicates and can create a proxy action to represent
the inconsistent action usage."""
def get_unit_clause_effects(
self, effect_cnf_clauses: Dict[str, Set[ComparablePredicate]]) -> Set[ComparablePredicate]:
"""
:param effect_cnf_clauses:
:return:
"""
unit_clause_effects = set()
for cnf_effects in effect_cnf_clauses.values():
if len(cnf_effects) == 1:
unit_clause_effects.update(cnf_effects)
return unit_clause_effects
def get_precondition_fluents(
self, precondition_cnfs: Dict[str, Set[ComparablePredicate]]) -> Set[ComparablePredicate]:
"""
:param precondition_cnfs:
:return:
"""
precondition_predicates = set()
for cnf in precondition_cnfs.values():
precondition_predicates.update(cnf)
return precondition_predicates
def create_proxy_actions(self, action_name: str, action_signature: SignatureType,
surely_preconditions: Set[ComparablePredicate],
add_effect_cnfs: Dict[str, Set[ComparablePredicate]],
delete_effect_cnfs: Dict[str, Set[ComparablePredicate]]) -> List[Action]:
"""Creates the proxy action permutations based on the algorithm to create the power set of safe actions.
:param action_name: the name of the original action that exists in the original domain.
:param action_signature: the original signature of the action as known in the domain.
:param surely_preconditions: the stored preconditions for the designated action.
:param add_effect_cnfs: the add effects CNFs learned through the learning process.
:param delete_effect_cnfs: the delete effects CNFs learned through the learning process.
:return:
"""
surely_add_effects = self.get_unit_clause_effects(add_effect_cnfs)
surely_delete_effects = self.get_unit_clause_effects(delete_effect_cnfs)
combined_combinations = self.create_effects_combinations(add_effect_cnfs, delete_effect_cnfs)
effects_preconditions_product = self.create_effects_product(combined_combinations)
proxy_actions = []
for index, product_item in enumerate(effects_preconditions_product):
effect_items = product_item[:-1]
preconditions = product_item[-1]
add_effects = []
delete_effects = []
for effect, effect_type in effect_items:
if effect_type == "add-effect":
add_effects.append(effect)
elif effect_type == "delete-effect":
delete_effects.append(effect)
all_preconditions = list(surely_preconditions.union(preconditions))
effect = Effect()
effect.addlist = surely_add_effects.union(add_effects)
effect.dellist = surely_delete_effects.union(delete_effects)
new_action = Action(name=f"proxy-{action_name}-{index}",
signature=action_signature,
precondition=all_preconditions,
effect=effect)
proxy_actions.append(new_action)
return proxy_actions
def create_effects_product(self, combined_combinations: List[List[Tuple[Any, ...]]]) -> List[Tuple[Any, ...]]:
"""
:param combined_combinations:
:return:
"""
preconditions_effects_product = []
selected_effects_product = list(product(*combined_combinations))
for product_item in selected_effects_product:
filtered_in_precondition = []
for index, selected_effect_fluent in enumerate(product_item):
relevant_fluents_variants = combined_combinations[index]
fluent_preconditions_items = filter(lambda f: f != selected_effect_fluent, relevant_fluents_variants)
fluent_preconditions = [item[0] for item in fluent_preconditions_items]
filtered_in_precondition.extend(fluent_preconditions)
preconditions_effects_product.append((*product_item, filtered_in_precondition))
return preconditions_effects_product
def create_effects_combinations(self, add_effect_cnfs, delete_effect_cnfs) -> List[List[Tuple[Any, ...]]]:
non_unit_add_effect_fluents = [fluent_name for fluent_name in add_effect_cnfs if
len(add_effect_cnfs[fluent_name]) > 1]
non_unit_del_effect_fluents = [fluent_name for fluent_name in delete_effect_cnfs if
len(delete_effect_cnfs[fluent_name]) > 1]
combined_combinations = []
for fluent_name in non_unit_add_effect_fluents:
# For now we only support the case where don't remove parameters from the signature.
combined_combinations.append(n_choose_k(list(add_effect_cnfs[fluent_name]), 1, "add-effect"))
for fluent_name in non_unit_del_effect_fluents:
combined_combinations.append(n_choose_k(list(delete_effect_cnfs[fluent_name]), 1, "delete-effect"))
return combined_combinations
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/proxy_action_generator.py
| 0.88775 | 0.557002 |
proxy_action_generator.py
|
pypi
|
from itertools import permutations
from typing import List, Tuple, Optional
from pddl.pddl import Type, Domain
from sam_learner.sam_models import ComparablePredicate, SignatureType, GroundedAction
from .predicates_matcher import PredicatesMatcher
def create_signature_permutations(
grounded_signature: SignatureType, lifted_signature: SignatureType,
subset_size: int) -> List[Tuple[Tuple[str, Tuple[Type]], Tuple[str, Tuple[Type]]]]:
"""Choose r items our of a list size n.
:param grounded_signature: the action's grounded signature.
:param lifted_signature: the action's lifted signature.
:param subset_size: the size of the subset.
:return: a list containing subsets of the original list.
"""
matching_signatures = zip(grounded_signature, lifted_signature)
matching_permutations = list(permutations(matching_signatures, subset_size))
return matching_permutations
class ExtendedMatcher(PredicatesMatcher):
"""An extended version of the predicate matcher class."""
def __init__(self, domain_path: Optional[str] = None, domain: Optional[Domain] = None):
super().__init__(domain_path, domain)
def _is_matching_possible(self, grounded_signature: SignatureType, predicate_objects: List[str]) -> bool:
"""Test whether it is possible to match the predicate to the current action.
:param grounded_signature: the action's grounded signature.
:param predicate_objects: the names of the objects that appear in the predicate.
:return: whether or not it is possible to match the predicate to the action based on the action's signature.
"""
grounded_action_objects = [signature_item[0] for signature_item in grounded_signature]
constants_names = [name for name in self.matcher_domain.constants]
possible_grounded_matches = grounded_action_objects + constants_names
if not all(predicate_obj in possible_grounded_matches for predicate_obj in predicate_objects):
self.logger.debug("The predicate objects are not contained in the action's object, matching aborted.")
return False
return True
def extended_predicate_matching(
self, grounded_predicate: ComparablePredicate, grounded_signature: SignatureType,
lifted_signature: SignatureType) -> Optional[List[ComparablePredicate]]:
"""The extended functionality that matches predicates to actions with duplicates.
:param grounded_predicate: the grounded predicate that appeared in the trajectory.
:param grounded_signature: the action's grounded signature.
:param lifted_signature: the action's lifted signature.
:return: the possible matching predicates.
"""
predicate_objects = [signature_item[0] for signature_item in grounded_predicate.signature]
if not self._is_matching_possible(grounded_signature, predicate_objects):
return None
constant_signature_items = [(name, (const_type,)) for name, const_type in self.matcher_domain.constants.items()]
grounded_objects = grounded_signature + constant_signature_items
lifted_objects = lifted_signature + constant_signature_items
matching_signature_permutations = create_signature_permutations(
grounded_objects, lifted_objects, len(predicate_objects))
possible_matches = []
for signature_option in matching_signature_permutations:
lifted_match = []
matching_grounded_action_objects = []
for grounded_signature_item, lifted_signature_item in signature_option:
lifted_match.append(lifted_signature_item)
matching_grounded_action_objects.append(grounded_signature_item[0])
if predicate_objects == matching_grounded_action_objects:
possible_matches.append(ComparablePredicate(grounded_predicate.name, lifted_match))
return possible_matches
def match_predicate_to_action_literals(
self, grounded_predicate: ComparablePredicate,
grounded_signature: SignatureType,
lifted_signature: SignatureType) -> Optional[List[ComparablePredicate]]:
"""Match a literal to a possible lifted precondition for the input action.
Note:
This method does not raise an error in case that there are duplications in either the state or the action.
This method first tries to use the parent matching method. In case of an error being raised, the method
will then rollback to the extended matching procedure.
:param grounded_predicate: the grounded predicate that represent part of the previous state.
:param grounded_signature: the signature of the action that contains the actual objects
that the action was executed on.
:param lifted_signature: the lifted signature of the action, is accessible from the
trajectory.
:return: a precondition, in case the action and the predicate contain matching objects,
None otherwise.
"""
try:
match = super(ExtendedMatcher, self).match_predicate_to_action_literals(
grounded_predicate, grounded_signature, lifted_signature)
return None if match is None else [match]
except ValueError:
self.logger.debug("Found duplications in either the state of the action. Due to this fact, "
"rolling back to the extend matching procedure.")
return self.extended_predicate_matching(grounded_predicate, grounded_signature, lifted_signature)
def get_possible_literal_matches(
self, grounded_action: GroundedAction, literals: List[ComparablePredicate]) -> List[ComparablePredicate]:
"""Get a list of possible preconditions for the action according to the previous state.
:param grounded_action: the grounded action that was executed according to the trajectory.
:param literals: the list of literals that we try to match according to the action.
:return: a list of possible preconditions for the action that is being executed.
"""
possible_matches = []
lifted_signature = grounded_action.lifted_signature
grounded_signature = grounded_action.grounded_signature
for predicate in literals:
matches = self.match_predicate_to_action_literals(predicate, grounded_signature, lifted_signature)
if matches is None:
continue
possible_matches.extend(matches)
return possible_matches
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/extended_predicate_matcher.py
| 0.934791 | 0.574753 |
extended_predicate_matcher.py
|
pypi
|
from typing import Tuple, List, Set
from pddl.pddl import Predicate
from sam_learner.sam_models.comparable_predicate import ComparablePredicate
from sam_learner.sam_models.state import State
def extract_states_facts(
previous_state: State, next_state: State) -> Tuple[Set[ComparablePredicate], Set[ComparablePredicate]]:
"""extract the set of effects from the states.
:param previous_state: the state that had been before the action was executed.
:param next_state: the state after the action was executed.
:return: the previous and the next states facts.
"""
prev_state_predicates = \
set([ComparablePredicate(predicate=predicate) for predicate in previous_state.facts])
next_state_predicates = \
set([ComparablePredicate(predicate=predicate) for predicate in next_state.facts])
return prev_state_predicates, next_state_predicates
def extract_effects(
previous_state: State, next_state: State) -> Tuple[List[ComparablePredicate], List[ComparablePredicate]]:
"""Extract the effects of the action according to the two lemmas that we know.
:param previous_state: the state that had been before the action was executed.
:param next_state: the state after the action was executed.
:return: the add effects and the del effects.
"""
prev_state_predicates, next_state_predicates = extract_states_facts(previous_state, next_state)
add_effects = next_state_predicates.difference(prev_state_predicates)
del_effects = prev_state_predicates.difference(next_state_predicates)
return list(add_effects), list(del_effects)
def extract_maybe_fluents(previous_state: State, next_state: State) -> List[ComparablePredicate]:
"""Extract the `maybe` effects that will only be used for statistic reasons.
:param previous_state: the state that had been before the action was executed.
:param next_state: the state after the action was executed.
:return: the list of predicates that could be used as the `maybe` effects.
"""
prev_state_predicates, next_state_predicates = extract_states_facts(previous_state, next_state)
return list(prev_state_predicates.intersection(next_state_predicates))
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/core/effects_extractor.py
| 0.893832 | 0.585575 |
effects_extractor.py
|
pypi
|
from typing import Tuple, List, Optional
from pddl.pddl import Predicate, Type
class ComparablePredicate(Predicate):
"""Class that extends the basic predicate to enable comparison."""
name: str
signature: List[Tuple[str, Tuple[Type]]]
def __init__(self, name: Optional[str] = None,
signature: Optional[List[Tuple[str, Tuple[Type]]]] = None,
predicate: Optional[Predicate] = None):
if predicate:
super(ComparablePredicate, self).__init__(predicate.name, predicate.signature)
else:
super(ComparablePredicate, self).__init__(name, signature)
@staticmethod
def is_sub_type(this_type: Type, other_type: Type) -> bool:
"""Checks if a type a subtype of the other.
:param this_type: the checked type.
:param other_type: the type that is checked to se if the first is subtype of.
:return: whether or not the first is a subtype of the other.
"""
ancestors_type_names = [this_type.name]
compared_type = this_type
while compared_type.parent is not None:
ancestors_type_names.append(compared_type.parent.name)
compared_type = compared_type.parent
return other_type.name in ancestors_type_names
@staticmethod
def extract_types(signature: List[Tuple[str, Tuple[Type]]]) -> List[Type]:
"""Extract the type of the object from the signature format.
:param signature: the signature of the predicate.
:return: the types that were extracted from the signature.
"""
types = []
for _, param_type in signature:
if type(param_type) is tuple or type(param_type) is list:
types.append(param_type[0])
else:
types.append(param_type)
return types
def __eq__(self, other: Predicate):
self_signature_params_name = [name for name, _ in self.signature]
other_signature_params_name = [name for name, _ in other.signature]
return \
(self.name == other.name and self_signature_params_name ==
other_signature_params_name and
all([self.is_sub_type(this_type, other_type) for this_type, other_type in
zip(self.extract_types(self.signature), self.extract_types(other.signature))]))
def __str__(self):
return f"{self.name}{str(self.signature)}".strip("\n")
def __hash__(self):
return hash(str(self))
def __copy__(self):
return ComparablePredicate(self.name, self.signature)
|
/sam_learner-2.1.9-py3-none-any.whl/sam_learner/sam_models/comparable_predicate.py
| 0.948692 | 0.408336 |
comparable_predicate.py
|
pypi
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.