ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a4922573f55f9753b4b772ecf26b0a782505243 | #!/usr/bin/env python
from cattle import from_env
import time
def _time_str(start, stop, ms):
running = ''
if stop is None:
running = '*'
stop = int(time.time() * 1000)
duration = stop - start
if ms:
unit = 'ms'
else:
duration = duration/1000
unit = 'seconds'
return '{}{} {}'.format(running, duration, unit)
def print_time(pi):
if 'stopTime' in pi:
return _time_str(pi.startTime, pi.stopTime, True)
if 'endTimeTS' not in pi or pi.endTimeTS is None:
return _time_str(pi.startTimeTS, None, False)
return _time_str(pi.startTimeTS, pi.endTimeTS, False)
def is_running(pi):
if 'stopTime' in pi:
return pi.stopTime is None
return 'endTimeTS' not in pi or pi.endTimeTS is None
def print_pi(pi, detail=False):
print print_time(pi), \
pi.id, \
pi.processName, \
'{}:{}'.format(pi.resourceType, pi.resourceId), \
pi.phase, \
pi.exitReason, \
pi.result
if detail or is_running(pi):
for pe in pi.processExecutions():
for x in pe.log.executions:
print_pe(x, prefix=' ')
def print_pe(pe, prefix=''):
print prefix, print_time(pe), 'PROCESS:', pe.name, \
'{}:{}'.format(pe.resourceType, pe.resourceId), pe.exitReason
for phe in pe.processHandlerExecutions:
print_phe(phe, prefix=prefix + ' ')
def print_phe(phe, prefix=''):
print prefix, print_time(phe), 'HANDLER:', phe.name
for child in phe.children:
for pe in child.executions:
print_pe(pe, prefix=prefix + ' ')
if __name__ == '__main__':
import sys
client = from_env()
if len(sys.argv) == 1:
for pi in client.list_process_instance(sort='startTime', order='desc',
limit=30):
print_pi(pi)
else:
pi = client.by_id_process_instance(sys.argv[1])
print_pi(pi, detail=True)
|
py | 1a4923fe4d6a8bbc1188c688fd6684da4466bb40 | """
Python 3.9 функция для запуска процесса обучения нейронной сети
Название файла train_c4.py
Version: 0.1
Author: Andrej Marinchenko
Date: 2021-12-20
"""
#!/usr/bin/env python
from alpha_net_c4 import ConnectNet, AlphaLoss, board_data
import os
import pickle
import datetime
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from torch.nn.utils import clip_grad_norm_
import matplotlib.pyplot as plt
import logging
logging.basicConfig(format='%(asctime)s [%(levelname)s]: %(message)s', \
datefmt='%m/%d/%Y %I:%M:%S %p', level=logging.INFO)
logger = logging.getLogger(__file__)
def save_as_pickle(filename, data):
completeName = os.path.join("./model_data/",\
filename)
with open(completeName, 'wb') as output:
pickle.dump(data, output)
def load_pickle(filename):
completeName = os.path.join("./model_data/",\
filename)
with open(completeName, 'rb') as pkl_file:
data = pickle.load(pkl_file)
return data
def load_state(net, optimizer, scheduler, args, iteration, new_optim_state=True):
""" Loads saved model and optimizer states if exists """
base_path = "./model_data/"
checkpoint_path = os.path.join(base_path, "%s_iter%d.pth.tar" % (args.neural_net_name, iteration))
start_epoch, checkpoint = 0, None
if os.path.isfile(checkpoint_path):
checkpoint = torch.load(checkpoint_path)
if checkpoint != None:
if (len(checkpoint) == 1) or (new_optim_state == True):
net.load_state_dict(checkpoint['state_dict'])
logger.info("Loaded checkpoint model %s." % checkpoint_path)
else:
start_epoch = checkpoint['epoch']
net.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
logger.info("Loaded checkpoint model %s, and optimizer, scheduler." % checkpoint_path)
return start_epoch
def load_results(iteration):
""" Loads saved results if exists """
losses_path = "./model_data/losses_per_epoch_iter%d.pkl" % iteration
if os.path.isfile(losses_path):
losses_per_epoch = load_pickle("losses_per_epoch_iter%d.pkl" % iteration)
logger.info("Loaded results buffer")
else:
losses_per_epoch = []
return losses_per_epoch
def train(net, dataset, optimizer, scheduler, start_epoch, cpu, args, iteration):
torch.manual_seed(cpu)
cuda = torch.cuda.is_available()
net.train()
criterion = AlphaLoss()
train_set = board_data(dataset)
train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True, num_workers=0, pin_memory=False)
losses_per_epoch = load_results(iteration + 1)
logger.info("Starting training process...")
update_size = len(train_loader)//10
print("Update step size: %d" % update_size)
for epoch in range(start_epoch, args.num_epochs):
total_loss = 0.0
losses_per_batch = []
for i,data in enumerate(train_loader,0):
state, policy, value = data
state, policy, value = state.float(), policy.float(), value.float()
if cuda:
state, policy, value = state.cuda(), policy.cuda(), value.cuda()
policy_pred, value_pred = net(state) # policy_pred = torch.Size([batch, 4672]) value_pred = torch.Size([batch, 1])
loss = criterion(value_pred[:,0], value, policy_pred, policy)
loss = loss/args.gradient_acc_steps
loss.backward()
clip_grad_norm_(net.parameters(), args.max_norm)
if (epoch % args.gradient_acc_steps) == 0:
optimizer.step()
optimizer.zero_grad()
total_loss += loss.item()
if i % update_size == (update_size - 1): # print every update_size-d mini-batches of size = batch_size
losses_per_batch.append(args.gradient_acc_steps*total_loss/update_size)
print('[Iteration %d] Process ID: %d [Epoch: %d, %5d/ %d points] total loss per batch: %.3f' %
(iteration, os.getpid(), epoch + 1, (i + 1)*args.batch_size, len(train_set), losses_per_batch[-1]))
print("Policy (actual, predicted):",policy[0].argmax().item(),policy_pred[0].argmax().item())
print("Policy data:", policy[0]); print("Policy pred:", policy_pred[0])
print("Value (actual, predicted):", value[0].item(), value_pred[0,0].item())
#print("Conv grad: %.7f" % net.conv.conv1.weight.grad.mean().item())
#print("Res18 grad %.7f:" % net.res_18.conv1.weight.grad.mean().item())
print(" ")
total_loss = 0.0
scheduler.step()
if len(losses_per_batch) >= 1:
losses_per_epoch.append(sum(losses_per_batch)/len(losses_per_batch))
if (epoch % 2) == 0:
save_as_pickle("losses_per_epoch_iter%d.pkl" % (iteration + 1), losses_per_epoch)
torch.save({
'epoch': epoch + 1,\
'state_dict': net.state_dict(),\
'optimizer' : optimizer.state_dict(),\
'scheduler' : scheduler.state_dict(),\
}, os.path.join("./model_data/",\
"%s_iter%d.pth.tar" % (args.neural_net_name, (iteration + 1))))
'''
# Early stopping
if len(losses_per_epoch) > 50:
if abs(sum(losses_per_epoch[-4:-1])/3-sum(losses_per_epoch[-16:-13])/3) <= 0.00017:
break
'''
logger.info("Finished Training!")
fig = plt.figure()
ax = fig.add_subplot(222)
ax.scatter([e for e in range(start_epoch, (len(losses_per_epoch) + start_epoch))], losses_per_epoch)
ax.set_xlabel("Epoch")
ax.set_ylabel("Loss per batch")
ax.set_title("Loss vs Epoch")
plt.savefig(os.path.join("./model_data/", "Loss_vs_Epoch_iter%d_%s.png" % ((iteration + 1), datetime.datetime.today().strftime("%Y-%m-%d"))))
plt.show()
def train_connectnet(args, iteration, new_optim_state):
# gather data
logger.info("Loading training data...")
data_path="./datasets/iter_%d/" % iteration
datasets = []
for idx,file in enumerate(os.listdir(data_path)):
filename = os.path.join(data_path,file)
with open(filename, 'rb') as fo:
datasets.extend(pickle.load(fo, encoding='bytes'))
datasets = np.array(datasets)
logger.info("Loaded data from %s." % data_path)
# train net
net = ConnectNet()
cuda = torch.cuda.is_available()
if cuda:
net.cuda()
optimizer = optim.Adam(net.parameters(), lr=args.lr, betas=(0.8, 0.999))
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[50,100,150,200,250,300,400], gamma=0.77)
start_epoch = load_state(net, optimizer, scheduler, args, iteration, new_optim_state)
train(net, datasets, optimizer, scheduler, start_epoch, 0, args, iteration)
|
py | 1a4924021b0c6d10c24a9dba92198ff87637d5cd | import unittest
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import multivariate_normal
from pyrolite.util.plot.density import (
percentile_contour_values_from_meshz,
plot_Z_percentiles,
)
from pyrolite.util.plot.legend import proxy_line
from matplotlib.lines import _get_dash_pattern, _scale_dashes
import matplotlib.colors
class TestPercentileContourValuesFromMeshZ(unittest.TestCase):
def setUp(self):
x, y = np.mgrid[-1:1:100j, -1:1:100j]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
self.z = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]).pdf(pos)
def test_default(self):
percentile_contour_values_from_meshz(self.z)
def test_percentiles(self):
for ps in [[1.0], [0.001], np.linspace(0.001, 1, 10), [0.95, 0.10]]:
with self.subTest(ps=ps):
pc, cs = percentile_contour_values_from_meshz(self.z, percentiles=ps)
def test_resolution(self):
for res in [10, 100, 1000, 10000]:
with self.subTest(res=res):
pc, cs = percentile_contour_values_from_meshz(self.z, resolution=res)
def test_ask_below_minimum(self):
for ps in [[0.0001], [0.000001]]:
with self.subTest(ps=ps):
pc, cs = percentile_contour_values_from_meshz(
self.z, percentiles=ps, resolution=5
)
self.assertIn("min", pc)
class TestPlotZPercentiles(unittest.TestCase):
def setUp(self):
x, y = np.mgrid[-1:1:100j, -1:1:100j]
pos = np.empty(x.shape + (2,))
pos[:, :, 0] = x
pos[:, :, 1] = y
self.xi, self.yi = x, y
self.zi = multivariate_normal([0.5, -0.2], [[2.0, 0.3], [0.3, 0.5]]).pdf(pos)
def test_default(self):
plot_Z_percentiles(self.xi, self.yi, zi=self.zi)
def test_specified_contour_labels(self):
contour_labels = ["95th", "66th", "33rd"]
cs = plot_Z_percentiles(
self.xi, self.yi, zi=self.zi, contour_labels=contour_labels
)
for contour_label, label in zip(contour_labels, cs.labelTextsList):
label = label.get_text()
self.assertTrue(contour_label == label)
def test_styling_specified(self):
fig, ax = plt.subplots(1)
colors = [matplotlib.colors.to_rgba(c) for c in ["g", "b", "k"]]
linestyles = [_get_dash_pattern(d) for d in ["-", "--", "-."]]
linewidths = [1, 2, 3]
cs = plot_Z_percentiles(
self.xi,
self.yi,
zi=self.zi,
ax=ax,
percentiles=[0.95, 0.66, 0.33],
colors=colors,
linestyles=linestyles,
linewidths=linewidths,
)
for contour, color, ls, lw in zip(
cs.collections, colors, linestyles, linewidths
):
self.assertTrue((contour.get_color() == color).all())
self.assertEqual(contour.get_linestyle(), [_scale_dashes(*ls, lw)])
self.assertEqual(contour.get_linewidth(), lw)
def test_linestyles_specified(self):
plot_Z_percentiles(
self.xi,
self.yi,
zi=self.zi,
percentiles=[0.95, 0.66, 0.33],
)
def test_percentiles(self):
for ps in [[1.0], [0.01], np.linspace(0.001, 1, 10), [0.95, 0.10]]:
with self.subTest(ps=ps):
plot_Z_percentiles(self.xi, self.yi, zi=self.zi, percentiles=ps)
def test_external_ax(self):
fig, ax = plt.subplots(1)
plot_Z_percentiles(self.xi, self.yi, zi=self.zi, ax=ax)
def test_extent(self):
for extent in [[-1, 1, -1, 1], [-0.01, 0.99, -1.01, -0.01], [-2, 2, -2, -2]]:
with self.subTest(extent=extent):
plot_Z_percentiles(self.xi, self.yi, zi=self.zi, extent=extent)
def tearDown(self):
plt.close("all")
if __name__ == "__main__":
unittest.main()
|
py | 1a492410f05ef26f73d1515deaf97009755fed9a | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._kusto_pools_operations import build_add_language_extensions_request_initial, build_check_name_availability_request, build_create_or_update_request_initial, build_delete_request_initial, build_detach_follower_databases_request_initial, build_get_request, build_list_by_workspace_request, build_list_follower_databases_request, build_list_language_extensions_request, build_list_skus_by_resource_request, build_list_skus_request, build_remove_language_extensions_request_initial, build_start_request_initial, build_stop_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class KustoPoolsOperations:
"""KustoPoolsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.synapse.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_skus(
self,
**kwargs: Any
) -> AsyncIterable["_models.SkuDescriptionList"]:
"""Lists eligible SKUs for Kusto Pool resource.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SkuDescriptionList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.SkuDescriptionList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SkuDescriptionList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_skus_request(
subscription_id=self._config.subscription_id,
template_url=self.list_skus.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_skus_request(
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SkuDescriptionList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_skus.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Synapse/skus'} # type: ignore
@distributed_trace_async
async def check_name_availability(
self,
location: str,
kusto_pool_name: "_models.KustoPoolCheckNameRequest",
**kwargs: Any
) -> "_models.CheckNameResult":
"""Checks that the kusto pool name is valid and is not already in use.
:param location: The name of Azure region.
:type location: str
:param kusto_pool_name: The name of the cluster.
:type kusto_pool_name: ~azure.mgmt.synapse.models.KustoPoolCheckNameRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:return: CheckNameResult, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.CheckNameResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.CheckNameResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(kusto_pool_name, 'KustoPoolCheckNameRequest')
request = build_check_name_availability_request(
subscription_id=self._config.subscription_id,
location=location,
content_type=content_type,
json=_json,
template_url=self.check_name_availability.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('CheckNameResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
check_name_availability.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Synapse/locations/{location}/kustoPoolCheckNameAvailability'} # type: ignore
@distributed_trace_async
async def list_by_workspace(
self,
resource_group_name: str,
workspace_name: str,
**kwargs: Any
) -> "_models.KustoPoolListResult":
"""List Kusto pools.
List all Kusto pools.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param workspace_name: The name of the workspace.
:type workspace_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KustoPoolListResult, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.KustoPoolListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KustoPoolListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_list_by_workspace_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
workspace_name=workspace_name,
template_url=self.list_by_workspace.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('KustoPoolListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_by_workspace.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools'} # type: ignore
@distributed_trace_async
async def get(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> "_models.KustoPool":
"""Gets a Kusto pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: KustoPool, or the result of cls(response)
:rtype: ~azure.mgmt.synapse.models.KustoPool
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.KustoPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('KustoPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}'} # type: ignore
async def _create_or_update_initial(
self,
workspace_name: str,
resource_group_name: str,
kusto_pool_name: str,
parameters: "_models.KustoPool",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> "_models.KustoPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.KustoPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KustoPool')
request = build_create_or_update_request_initial(
workspace_name=workspace_name,
resource_group_name=resource_group_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
if_match=if_match,
if_none_match=if_none_match,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('KustoPool', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('KustoPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
workspace_name: str,
resource_group_name: str,
kusto_pool_name: str,
parameters: "_models.KustoPool",
if_match: Optional[str] = None,
if_none_match: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.KustoPool"]:
"""Create or update a Kusto pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param parameters: The Kusto pool parameters supplied to the CreateOrUpdate operation.
:type parameters: ~azure.mgmt.synapse.models.KustoPool
:param if_match: The ETag of the Kusto Pool. Omit this value to always overwrite the current
Kusto Pool. Specify the last-seen ETag value to prevent accidentally overwriting concurrent
changes.
:type if_match: str
:param if_none_match: Set to '*' to allow a new Kusto Pool to be created, but to prevent
updating an existing Kusto Pool. Other values will result in a 412 Pre-condition Failed
response.
:type if_none_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either KustoPool or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.synapse.models.KustoPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.KustoPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
workspace_name=workspace_name,
resource_group_name=resource_group_name,
kusto_pool_name=kusto_pool_name,
parameters=parameters,
if_match=if_match,
if_none_match=if_none_match,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('KustoPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}'} # type: ignore
async def _update_initial(
self,
workspace_name: str,
resource_group_name: str,
kusto_pool_name: str,
parameters: "_models.KustoPoolUpdate",
if_match: Optional[str] = None,
**kwargs: Any
) -> "_models.KustoPool":
cls = kwargs.pop('cls', None) # type: ClsType["_models.KustoPool"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'KustoPoolUpdate')
request = build_update_request_initial(
workspace_name=workspace_name,
resource_group_name=resource_group_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
if_match=if_match,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('KustoPool', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('KustoPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
workspace_name: str,
resource_group_name: str,
kusto_pool_name: str,
parameters: "_models.KustoPoolUpdate",
if_match: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.KustoPool"]:
"""Update a Kusto Kusto Pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param parameters: The Kusto pool parameters supplied to the Update operation.
:type parameters: ~azure.mgmt.synapse.models.KustoPoolUpdate
:param if_match: The ETag of the Kusto Pool. Omit this value to always overwrite the current
Kusto Pool. Specify the last-seen ETag value to prevent accidentally overwriting concurrent
changes.
:type if_match: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either KustoPool or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.synapse.models.KustoPool]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.KustoPool"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
workspace_name=workspace_name,
resource_group_name=resource_group_name,
kusto_pool_name=kusto_pool_name,
parameters=parameters,
if_match=if_match,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('KustoPool', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}'} # type: ignore
async def _delete_initial(
self,
workspace_name: str,
resource_group_name: str,
kusto_pool_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
workspace_name=workspace_name,
resource_group_name=resource_group_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
workspace_name: str,
resource_group_name: str,
kusto_pool_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes a Kusto pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
workspace_name=workspace_name,
resource_group_name=resource_group_name,
kusto_pool_name=kusto_pool_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}'} # type: ignore
async def _stop_initial(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_stop_request_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self._stop_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/stop'} # type: ignore
@distributed_trace_async
async def begin_stop(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops a Kusto pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/stop'} # type: ignore
async def _start_initial(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_start_request_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self._start_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/start'} # type: ignore
@distributed_trace_async
async def begin_start(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts a Kusto pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
resource_group_name=resource_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/start'} # type: ignore
@distributed_trace
def list_skus_by_resource(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ListResourceSkusResult"]:
"""Returns the SKUs available for the provided resource.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ListResourceSkusResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.ListResourceSkusResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ListResourceSkusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_skus_by_resource_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_skus_by_resource.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_skus_by_resource_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ListResourceSkusResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_skus_by_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/skus'} # type: ignore
@distributed_trace
def list_language_extensions(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.LanguageExtensionsList"]:
"""Returns a list of language extensions that can run within KQL queries.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either LanguageExtensionsList or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.LanguageExtensionsList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.LanguageExtensionsList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_language_extensions_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_language_extensions.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_language_extensions_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("LanguageExtensionsList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_language_extensions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/listLanguageExtensions'} # type: ignore
async def _add_language_extensions_initial(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
language_extensions_to_add: "_models.LanguageExtensionsList",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(language_extensions_to_add, 'LanguageExtensionsList')
request = build_add_language_extensions_request_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
content_type=content_type,
json=_json,
template_url=self._add_language_extensions_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_add_language_extensions_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/addLanguageExtensions'} # type: ignore
@distributed_trace_async
async def begin_add_language_extensions(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
language_extensions_to_add: "_models.LanguageExtensionsList",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Add a list of language extensions that can run within KQL queries.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param language_extensions_to_add: The language extensions to add.
:type language_extensions_to_add: ~azure.mgmt.synapse.models.LanguageExtensionsList
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._add_language_extensions_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
resource_group_name=resource_group_name,
language_extensions_to_add=language_extensions_to_add,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_add_language_extensions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/addLanguageExtensions'} # type: ignore
async def _remove_language_extensions_initial(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
language_extensions_to_remove: "_models.LanguageExtensionsList",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(language_extensions_to_remove, 'LanguageExtensionsList')
request = build_remove_language_extensions_request_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
content_type=content_type,
json=_json,
template_url=self._remove_language_extensions_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_remove_language_extensions_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/removeLanguageExtensions'} # type: ignore
@distributed_trace_async
async def begin_remove_language_extensions(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
language_extensions_to_remove: "_models.LanguageExtensionsList",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Remove a list of language extensions that can run within KQL queries.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param language_extensions_to_remove: The language extensions to remove.
:type language_extensions_to_remove: ~azure.mgmt.synapse.models.LanguageExtensionsList
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._remove_language_extensions_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
resource_group_name=resource_group_name,
language_extensions_to_remove=language_extensions_to_remove,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_remove_language_extensions.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/removeLanguageExtensions'} # type: ignore
@distributed_trace
def list_follower_databases(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.FollowerDatabaseListResult"]:
"""Returns a list of databases that are owned by this Kusto Pool and were followed by another
Kusto Pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FollowerDatabaseListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.synapse.models.FollowerDatabaseListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FollowerDatabaseListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_follower_databases_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=self.list_follower_databases.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_follower_databases_request(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("FollowerDatabaseListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_follower_databases.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/listFollowerDatabases'} # type: ignore
async def _detach_follower_databases_initial(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
follower_database_to_remove: "_models.FollowerDatabaseDefinition",
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(follower_database_to_remove, 'FollowerDatabaseDefinition')
request = build_detach_follower_databases_request_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
content_type=content_type,
json=_json,
template_url=self._detach_follower_databases_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_detach_follower_databases_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/detachFollowerDatabases'} # type: ignore
@distributed_trace_async
async def begin_detach_follower_databases(
self,
workspace_name: str,
kusto_pool_name: str,
resource_group_name: str,
follower_database_to_remove: "_models.FollowerDatabaseDefinition",
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Detaches all followers of a database owned by this Kusto Pool.
:param workspace_name: The name of the workspace.
:type workspace_name: str
:param kusto_pool_name: The name of the Kusto pool.
:type kusto_pool_name: str
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param follower_database_to_remove: The follower databases properties to remove.
:type follower_database_to_remove: ~azure.mgmt.synapse.models.FollowerDatabaseDefinition
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._detach_follower_databases_initial(
workspace_name=workspace_name,
kusto_pool_name=kusto_pool_name,
resource_group_name=resource_group_name,
follower_database_to_remove=follower_database_to_remove,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_detach_follower_databases.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Synapse/workspaces/{workspaceName}/kustoPools/{kustoPoolName}/detachFollowerDatabases'} # type: ignore
|
py | 1a49242caf687db3bda16d0beed9327910d77878 | import numpy as np # linear algebra
import skimage.io
import os
import sys
np.random.seed(1234)
import scipy.misc
import skimage.morphology as mph
from skimage import color
dd = sys.argv[1]
STAGE1_TRAIN = "../inputs/"+dd
STAGE1_TRAIN_IMAGE_PATTERN = "%s/{}/images/{}.png" % STAGE1_TRAIN
STAGE1_TRAIN_MASK_PATTERN = "%s/{}/masks/*.png" % STAGE1_TRAIN
# Get image names
def image_ids_in(root_dir, ignore=['.DS_Store', 'summary.csv', 'stage1_train_labels.csv', 'vsamples.csv', 'stage1_solution.csv', 'samples.csv']):
ids = []
for id in os.listdir(root_dir):
if id in ignore:
print('Skipping ID:', id)
else:
ids.append(id)
return ids
# read in images
def read_image(image_id, space="rgb"):
print(image_id)
image_file = STAGE1_TRAIN_IMAGE_PATTERN.format(image_id, image_id)
image = skimage.io.imread(image_file)
# Drop alpha which is not used
image = image[:, :, :3]
if space == "hsv":
image = skimage.color.rgb2hsv(image)
return image
# Get image width, height and combine masks available.
def read_image_labels(image_id, space="rgb"):
image = read_image(image_id, space = space)
mask_file = STAGE1_TRAIN_MASK_PATTERN.format(image_id)
masks = skimage.io.imread_collection(mask_file).concatenate()
mkk = []
for i in masks:
mask = i/255
selem = mph.disk(1)
mask = mph.erosion(mask, selem)
mkk.append(mask)
mkk = np.asarray(mkk)
height, width, _ = image.shape
num_masks = masks.shape[0]
labels = np.zeros((height, width), np.uint16)
for index in range(0, num_masks):
labels[mkk[index] > 0] = 1
try:
os.mkdir(STAGE1_TRAIN+'/'+image_id+'/label')
except:
pass
scipy.misc.imsave(STAGE1_TRAIN+'/'+image_id+'/label/ER_Combined.png', labels)
return labels
train_image_ids = image_ids_in(STAGE1_TRAIN)
for im in train_image_ids:
read_image_labels(im)
|
py | 1a49250d6b14bb5542c0f559d76e77bdfd963e12 | # @Time : 12/07/21 1:05 PM
# @Author : Fabrice Harel-Canada
# @File : rick_and_morty_stories.py
import torch
from transformers import pipeline, set_seed
from transformers.pipelines import TextGenerationPipeline
class RickAndMortyStories:
def __init__(self, mask_bad_words=True):
self.pipeline = pipeline("text-generation", model="e-tony/gpt2-rnm")
if self.pipeline.tokenizer.pad_token is None:
self.pipeline.tokenizer.pad_token = self.pipeline.tokenizer.eos_token
self.pipeline.model.config.pad_token_id = self.pipeline.model.config.eos_token_id
self.mask_bad_words = mask_bad_words
self.bad_words = self.load_bad_words()
def load_bad_words(self):
import urllib
bad_words = []
try:
file = urllib.request.urlopen(
"https://raw.githubusercontent.com/RobertJGabriel/Google-profanity-words/master/list.txt"
)
for line in file:
dline = line.decode("utf-8")
bad_words.append(dline.split("\n")[0])
except:
print("Failed to load bad words list.")
return bad_words
def tokens2text(self, tokens):
return self.pipeline.tokenizer.decode(tokens)
def generate(self, inputs, max_length=250):
outputs = self.pipeline(
inputs,
do_sample=True,
max_length=len(inputs) + max_length,
top_k=50,
top_p=0.95,
num_return_sequences=1,
)
output_text = self._mask_bad_words(outputs[0]["generated_text"])
return output_text
def _mask_bad_words(self, text):
explicit = False
res_text = text.lower()
for word in self.bad_words:
if word in res_text:
print(word)
res_text = res_text.replace(word, word[0] + "*" * len(word[1:]))
explicit = True
if explicit:
output_text = ""
for oword, rword in zip(text.split(" "), res_text.split(" ")):
if oword.lower() == rword:
output_text += oword + " "
else:
output_text += rword + " "
text = output_text
return text
if __name__ == "__main__":
rm_story_generator = RickAndMortyStories()
STARTERS = {
0: "Rick: Morty, quick! Get in the car!\nMorty: Oh no, I can't do it Rick! Please not this again.\nRick: You don't have a choice! The crystal demons are going to eat you if you don't get in!",
1: "Elon: Oh, you think you're all that Rick? Fight me in a game of space squash!\nRick: Let's go, you wanna-be genius!\nElon: SpaceX fleet, line up!",
2: "Morty: I love Jessica, I want us to get married on Octopulon 300 and have octopus babies.\nRick: Shut up, Morty! You're not going to Octopulon 300!",
3: "Rick: Hey there, Jerry! What a nice day for taking these anti-gravity shoes for a spin!\nJerry: Wow, Rick! You would let me try out one of your crazy gadgets?\nRick: Of course, Jerry! That's how much I respect you.",
4: "Rick: Come on, flip the pickle, Morty. You're not gonna regret it. The payoff is huge.",
5: "Rick: I turned myself into a pickle, Morty! Boom! Big reveal - I'm a pickle. What do you think about that? I turned myself into a pickle!",
6: "Rick: Come on, flip the pickle, Morty. You're not gonna regret it. The payoff is huge.\nMorty: What? Where are you?\nRick: Morty, just do it! [laughing] Just flip the pickle!",
}
for i, starter_text in STARTERS.items():
print("starter_text:", starter_text)
outputs = rm_story_generator.generate(starter_text)
texts = [out['generated_text'] for out in outputs]
print(texts[0])
|
py | 1a4925d7d482faaa22a796e6f26a7d71368b66be | # NASBench 301 stuff here
import sys
from pathlib import Path
sys.path.append('./darts/cnn')
lib_dir = (Path(__file__).parent / 'darts' / 'cnn').resolve()
if str(lib_dir) not in sys.path: sys.path.insert(0, str(lib_dir))
import genotypes
from model_search import Network, NetworkNB
import utils
import time
import math
import copy
import random
import logging
import os
import gc
import numpy as np
import torch
from torch.autograd import Variable
import torchvision.datasets as dset
import torch.nn as nn
import torch.nn.functional as F
import torch.backends.cudnn as cudnn
from typing import *
from collections import namedtuple
Genotype_tuple = namedtuple('Genotype_tuple', 'normal normal_concat reduce reduce_concat')
class Genotype:
def __init__(self, normal, normal_concat, reduce, reduce_concat) -> None:
self.normal = normal
self.normal_concat = normal_concat
self.reduce = reduce
self.reduce_concat = reduce_concat
self.genotype_tuple = Genotype_tuple(normal, normal_concat, reduce, reduce_concat)
def tostr(self):
return str(self.genotype_tuple)
def __hash__(self):
return hash(str(self.genotype_tuple))
def __repr__(self):
return str(self.genotype_tuple)
def __getitem__(self, k):
return getattr(self, k)
def get_DARTS_randomNAS(discrete=True, layers=8):
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
model = NetworkNB(C=16, num_classes=10, layers=layers, criterion=criterion, multiplier=4, stem_multiplier=3, discrete=discrete)
print(f"Instantiated DARTS model with discrete={discrete}")
model = model.cuda()
return model
class AttrDict(dict):
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class DartsWrapper:
def __init__(self, save_path, seed, batch_size, grad_clip, epochs, resume_iter=None, init_channels=16):
args = {}
args['data'] = r'C:\Users\miros\Documents\Oxford\thesis\liamcli_darts\darts\data'
args['epochs'] = epochs
args['learning_rate'] = 0.025
args['batch_size'] = batch_size
args['learning_rate_min'] = 0.001
args['momentum'] = 0.9
args['weight_decay'] = 3e-4
args['init_channels'] = init_channels
args['layers'] = 8
args['drop_path_prob'] = 0.3
args['grad_clip'] = grad_clip
args['train_portion'] = 0.5
args['seed'] = seed
args['log_interval'] = 50
args['save'] = save_path
args['gpu'] = 0
args['cuda'] = True
args['cutout'] = False
args['cutout_length'] = 16
args['report_freq'] = 50
args = AttrDict(args)
self.args = args
self.seed = seed
np.random.seed(args.seed)
random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = False
cudnn.enabled=True
cudnn.deterministic=True
torch.cuda.manual_seed_all(args.seed)
train_transform, valid_transform = utils._data_transforms_cifar10(args)
train_data = dset.CIFAR10(root=args.data, train=True, download=True, transform=train_transform)
num_train = len(train_data)
indices = list(range(num_train))
split = int(np.floor(args.train_portion * num_train))
self.train_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[:split]),
pin_memory=True, num_workers=0, worker_init_fn=np.random.seed(args.seed))
self.valid_queue = torch.utils.data.DataLoader(
train_data, batch_size=args.batch_size,
sampler=torch.utils.data.sampler.SubsetRandomSampler(indices[split:num_train]),
pin_memory=True, num_workers=0, worker_init_fn=np.random.seed(args.seed))
self.train_iter = iter(self.train_queue)
self.valid_iter = iter(self.valid_queue)
self.steps = 0
self.epochs = 0
self.total_loss = 0
self.start_time = time.time()
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
self.criterion = criterion
model = Network(args.init_channels, 10, args.layers, self.criterion)
model = model.cuda()
self.model = model
try:
self.load()
logging.info('loaded previously saved weights')
except Exception as e:
print(e)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
optimizer = torch.optim.SGD(
self.model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
self.optimizer = optimizer
self.scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
if resume_iter is not None:
self.steps = resume_iter
self.epochs = int(resume_iter / len(self.train_queue))
logging.info("Resuming from epoch %d" % self.epochs)
self.objs = utils.AvgrageMeter()
self.top1 = utils.AvgrageMeter()
self.top5 = utils.AvgrageMeter()
for i in range(self.epochs):
self.scheduler.step()
size = 0
for p in model.parameters():
size += p.nelement()
logging.info('param size: {}'.format(size))
total_params = sum(x.data.nelement() for x in model.parameters())
logging.info('Args: {}'.format(args))
logging.info('Model total parameters: {}'.format(total_params))
def train_batch(self, arch):
args = self.args
if self.steps % len(self.train_queue) == 0:
self.scheduler.step()
self.objs = utils.AvgrageMeter()
self.top1 = utils.AvgrageMeter()
self.top5 = utils.AvgrageMeter()
lr = self.scheduler.get_lr()[0]
weights = self.get_weights_from_arch(arch)
self.set_model_weights(weights)
step = self.steps % len(self.train_queue)
input, target = next(self.train_iter)
self.model.train()
n = input.size(0)
input = Variable(input, requires_grad=False).cuda()
target = Variable(target, requires_grad=False).cuda()
# get a random minibatch from the search queue with replacement
self.optimizer.zero_grad()
logits = self.model(input, discrete=True)
if type(logits) is tuple:
_, logits = logits
loss = self.criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm(self.model.parameters(), args.grad_clip)
self.optimizer.step()
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
self.objs.update(loss.item(), n)
self.top1.update(prec1.item(), n)
self.top5.update(prec5.item(), n)
if step % args.report_freq == 0:
logging.info('train %03d %e %f %f', step, self.objs.avg, self.top1.avg, self.top5.avg)
self.steps += 1
if self.steps % len(self.train_queue) == 0:
self.epochs += 1
self.train_iter = iter(self.train_queue)
valid_err = self.evaluate(arch)
logging.info('epoch %d | train_acc %f | valid_acc %f' % (self.epochs, self.top1.avg, 1-valid_err))
self.save()
def evaluate(self, arch, split=None):
# Return error since we want to minimize obj val
logging.info(arch)
objs = utils.AvgrageMeter()
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
weights = self.get_weights_from_arch(arch)
self.set_model_weights(weights)
self.model.eval()
if split is None:
n_batches = 10
else:
n_batches = len(self.valid_queue)
for step in range(n_batches):
try:
input, target = next(self.valid_iter)
except Exception as e:
logging.info('looping back over valid set')
self.valid_iter = iter(self.valid_queue)
input, target = next(self.valid_iter)
with torch.no_grad():
input = Variable(input).cuda()
target = Variable(target).cuda()
logits = self.model(input, discrete=True)
if type(logits) is tuple:
_, logits = logits
loss = self.criterion(logits, target)
prec1, prec5 = utils.accuracy(logits, target, topk=(1, 5))
n = input.size(0)
objs.update(loss.item(), n)
top1.update(prec1.item(), n)
top5.update(prec5.item(), n)
if step % self.args.report_freq == 0:
logging.info('valid %03d %e %f %f', step, objs.avg, top1.avg, top5.avg)
return 1-top1.avg
def save(self):
utils.save(self.model, os.path.join(self.args.save, 'weights.pt'))
def load(self):
utils.load(self.model, os.path.join(self.args.save, 'weights.pt'))
def get_weights_from_arch(self, arch):
k = sum(1 for i in range(self.model._steps) for n in range(2+i))
num_ops = len(genotypes.PRIMITIVES)
n_nodes = self.model._steps
alphas_normal = Variable(torch.zeros(k, num_ops).cuda(), requires_grad=False)
alphas_reduce = Variable(torch.zeros(k, num_ops).cuda(), requires_grad=False)
offset = 0
for i in range(n_nodes):
normal1 = arch[0][2*i]
normal2 = arch[0][2*i+1]
reduce1 = arch[1][2*i]
reduce2 = arch[1][2*i+1]
alphas_normal[offset+normal1[0], normal1[1]] = 1
alphas_normal[offset+normal2[0], normal2[1]] = 1
alphas_reduce[offset+reduce1[0], reduce1[1]] = 1
alphas_reduce[offset+reduce2[0], reduce2[1]] = 1
offset += (i+2)
arch_parameters = [
alphas_normal,
alphas_reduce,
]
return arch_parameters
def set_model_weights(self, weights):
self.model.alphas_normal = weights[0]
self.model.alphas_reduce = weights[1]
self.model.arch_normal_parameters = weights[0]
self.model.arch_reduce_parameters = weights[1]
self.model._arch_parameters = [self.model.alphas_normal, self.model.alphas_reduce]
self.model.dynamic_cell = Genotype(normal=self.model.alphas_normal, reduce = self.model.alphas_reduce, normal_concat=[2,3,4,5], reduce_concat=[2,3,4,5])
def sample_arch(self):
k = sum(1 for i in range(self.model._steps) for n in range(2+i))
num_ops = len(genotypes.PRIMITIVES)
n_nodes = self.model._steps
normal = []
reduction = []
for i in range(n_nodes):
ops = np.random.choice(range(num_ops), 4)
nodes_in_normal = np.random.choice(range(i+2), 2, replace=False)
nodes_in_reduce = np.random.choice(range(i+2), 2, replace=False)
normal.extend([(nodes_in_normal[0], ops[0]), (nodes_in_normal[1], ops[1])])
reduction.extend([(nodes_in_reduce[0], ops[2]), (nodes_in_reduce[1], ops[3])])
return (normal, reduction)
def perturb_arch(self, arch):
new_arch = copy.deepcopy(arch)
num_ops = len(genotypes.PRIMITIVES)
cell_ind = np.random.choice(2)
step_ind = np.random.choice(self.model._steps)
nodes_in = np.random.choice(step_ind+2, 2, replace=False)
ops = np.random.choice(range(num_ops), 2)
new_arch[cell_ind][2*step_ind] = (nodes_in[0], ops[0])
new_arch[cell_ind][2*step_ind+1] = (nodes_in[1], ops[1])
return new_arch
|
py | 1a49270b4e8c26e6630601352e341c1304a338ed | # -*- coding: utf-8 -*-
"""DNACenterAPI non_fabric_wireless API fixtures and tests.
Copyright (c) 2019-2020 Cisco and/or its affiliates.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import pytest
from tests.environment import DNA_CENTER_VERSION
pytestmark = pytest.mark.skipif(DNA_CENTER_VERSION != '1.2.10', reason='version does not match')
def is_valid_delete_and_provision_ssid(json_schema_validate, obj):
json_schema_validate('jsd_cca098344a489dfa_v1_2_10').validate(obj)
return True
def delete_and_provision_ssid(api):
endpoint_result = api.non_fabric_wireless.delete_and_provision_ssid(
managed_aplocations='string',
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_delete_and_provision_ssid(api, validator):
assert is_valid_delete_and_provision_ssid(
validator,
delete_and_provision_ssid(api)
)
def delete_and_provision_ssid_default(api):
endpoint_result = api.non_fabric_wireless.delete_and_provision_ssid(
managed_aplocations='string',
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_delete_and_provision_ssid_default(api, validator):
try:
assert is_valid_delete_and_provision_ssid(
validator,
delete_and_provision_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_enterprise_ssid(json_schema_validate, obj):
return True if obj else False
def create_enterprise_ssid(api):
endpoint_result = api.non_fabric_wireless.create_enterprise_ssid(
active_validation=True,
enableBroadcastSSID=True,
enableFastLane=True,
enableMACFiltering=True,
fastTransition='Adaptive',
name='********************************',
passphrase='********',
payload=None,
radioPolicy='Dual band operation (2.4GHz and 5GHz)',
securityLevel='WPA2_ENTERPRISE',
trafficType='voicedata'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_create_enterprise_ssid(api, validator):
assert is_valid_create_enterprise_ssid(
validator,
create_enterprise_ssid(api)
)
def create_enterprise_ssid_default(api):
endpoint_result = api.non_fabric_wireless.create_enterprise_ssid(
active_validation=True,
enableBroadcastSSID=None,
enableFastLane=None,
enableMACFiltering=None,
fastTransition=None,
name=None,
passphrase=None,
payload=None,
radioPolicy=None,
securityLevel=None,
trafficType=None
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_create_enterprise_ssid_default(api, validator):
try:
assert is_valid_create_enterprise_ssid(
validator,
create_enterprise_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_create_and_provision_ssid(json_schema_validate, obj):
json_schema_validate('jsd_db9f997f4e59aec1_v1_2_10').validate(obj)
return True
def create_and_provision_ssid(api):
endpoint_result = api.non_fabric_wireless.create_and_provision_ssid(
active_validation=True,
enableFabric=True,
flexConnect={'enableFlexConnect': True, 'localToVlan': 0},
managedAPLocations=['string'],
payload=None,
ssidDetails={'name': 'string', 'securityLevel': 'WPA2_ENTERPRISE', 'enableFastLane': True, 'passphrase': 'string', 'trafficType': 'data', 'enableBroadcastSSID': True, 'radioPolicy': 'Dual band operation (2.4GHz and 5GHz)', 'enableMACFiltering': True, 'fastTransition': 'Adaptive', 'webAuthURL': 'string'},
ssidType='Guest',
vlanAndDynamicInterfaceDetails={'managedAPLocation': {'interfaceIPAddress': 'string', 'interfaceNetmaskInCIDR': 0, 'interfaceGateway': 'string', 'lagOrPortNumber': 0}, 'vlanId': 0, 'vlanName': 'string'}
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_create_and_provision_ssid(api, validator):
assert is_valid_create_and_provision_ssid(
validator,
create_and_provision_ssid(api)
)
def create_and_provision_ssid_default(api):
endpoint_result = api.non_fabric_wireless.create_and_provision_ssid(
active_validation=True,
enableFabric=None,
flexConnect=None,
managedAPLocations=None,
payload=None,
ssidDetails=None,
ssidType=None,
vlanAndDynamicInterfaceDetails=None
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_create_and_provision_ssid_default(api, validator):
try:
assert is_valid_create_and_provision_ssid(
validator,
create_and_provision_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_delete_enterprise_ssid(json_schema_validate, obj):
json_schema_validate('jsd_c7a6592b4b98a369_v1_2_10').validate(obj)
return True
def delete_enterprise_ssid(api):
endpoint_result = api.non_fabric_wireless.delete_enterprise_ssid(
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_delete_enterprise_ssid(api, validator):
assert is_valid_delete_enterprise_ssid(
validator,
delete_enterprise_ssid(api)
)
def delete_enterprise_ssid_default(api):
endpoint_result = api.non_fabric_wireless.delete_enterprise_ssid(
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_delete_enterprise_ssid_default(api, validator):
try:
assert is_valid_delete_enterprise_ssid(
validator,
delete_enterprise_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
def is_valid_get_enterprise_ssid(json_schema_validate, obj):
json_schema_validate('jsd_cca519ba45ebb423_v1_2_10').validate(obj)
return True
def get_enterprise_ssid(api):
endpoint_result = api.non_fabric_wireless.get_enterprise_ssid(
ssid_name='string'
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_get_enterprise_ssid(api, validator):
assert is_valid_get_enterprise_ssid(
validator,
get_enterprise_ssid(api)
)
def get_enterprise_ssid_default(api):
endpoint_result = api.non_fabric_wireless.get_enterprise_ssid(
ssid_name=None
)
return endpoint_result
@pytest.mark.non_fabric_wireless
def test_get_enterprise_ssid_default(api, validator):
try:
assert is_valid_get_enterprise_ssid(
validator,
get_enterprise_ssid_default(api)
)
except Exception as original_e:
with pytest.raises(TypeError, match="but instead we received None"):
raise original_e
|
py | 1a492817954c371f03888b0c2df13cbe92c25ec2 | # coding=utf-8
from __future__ import absolute_import, print_function
import os
import numpy as np
from suanpan.app import app
from suanpan.storage import storage
from suanpan.utils import image
from arguments import Images
@app.input(Images(key="inputImage"))
@app.output(Images(key="outputImage"))
def SPRemoveWatermark(context):
args = context.args
images = args.inputImage
alpha = 2.0
beta = -160
for i, img in enumerate(images):
new = alpha * img + beta
new = np.clip(new, 0, 255).astype(np.uint8)
image.save(
os.path.join(
args.outputImage,
storage.delimiter.join(images.images[i].split(storage.delimiter)[8:]),
),
new,
)
return args.outputImage
if __name__ == "__main__":
SPRemoveWatermark()
|
py | 1a49285b8a429ccfd6d07b8fc283699f2089fa37 | #!/usr/bin/env python3
"""
Prompt:
Loop through all numbers from 1 to 100. If the number is divisible by 3, print
out "Fizz" instead. If the number is divisible by 5, print out "Buzz" instead.
"""
from typing import Iterable
from typing import Union
def fizz_buzz(n: int) -> Iterable[Union[int, str]]:
for i in range(n):
if i%3 == 0: yield 'Fizz'
elif i%5 == 0: yield 'Buzz'
else: yield i
if __name__ == '__main__':
print(list(fizz_buzz(17)))
|
py | 1a49290e5030729b4f7e055d04a116961252c40e | from gi.repository import Gtk, Gdk
css = """
#top GtkComboBox {
background-color: #000000;
}
GtkWindow {
color: black;
background: black;
background-color: black;
}
GtkComboBox {
color: black;
background: black;
background-color: black;
}
"""
class ComboBoxWindow(Gtk.Window):
def __init__(self):
style_provider = Gtk.CssProvider()
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
Gtk.Window.__init__(self, title="ComboBox Example")
self.set_border_width(10)
name_store = Gtk.ListStore(int, str)
name_store.append([1, "Billy Bob"])
name_store.append([11, "Billy Bob Junior"])
name_store.append([12, "Sue Bob"])
name_store.append([2, "Joey Jojo"])
name_store.append([3, "Rob McRoberts"])
name_store.append([31, "Xavier McRoberts"])
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=6)
name_combo = Gtk.ComboBox.new_with_model_and_entry(name_store)
name_combo.connect("changed", self.on_name_combo_changed)
name_combo.set_entry_text_column(1)
vbox.pack_start(name_combo, False, False, 0)
country_store = Gtk.ListStore(str)
countries = ["Austria", "Brazil", "Belgium", "France", "Germany",
"Switzerland", "United Kingdom", "United States of America",
"Uruguay"]
for country in countries:
country_store.append([country])
country_combo = Gtk.ComboBox.new_with_model(country_store)
country_combo.connect("changed", self.on_country_combo_changed)
renderer_text = Gtk.CellRendererText()
country_combo.pack_start(renderer_text, True)
country_combo.add_attribute(renderer_text, "text", 0)
vbox.pack_start(country_combo, False, False, True)
currencies = ["Euro", "US Dollars", "British Pound", "Japanese Yen",
"Russian Ruble", "Mexican peso", "Swiss franc"]
currency_combo = Gtk.ComboBoxText()
currency_combo.set_entry_text_column(0)
currency_combo.connect("changed", self.on_currency_combo_changed)
for currency in currencies:
currency_combo.append_text(currency)
vbox.pack_start(currency_combo, False, False, 0)
self.add(vbox)
def on_name_combo_changed(self, combo):
tree_iter = combo.get_active_iter()
if tree_iter != None:
model = combo.get_model()
row_id, name = model[tree_iter][:2]
print("Selected: ID=%d, name=%s" % (row_id, name))
else:
entry = combo.get_child()
print("Entered: %s" % entry.get_text())
def on_country_combo_changed(self, combo):
tree_iter = combo.get_active_iter()
if tree_iter != None:
model = combo.get_model()
country = model[tree_iter][0]
print("Selected: country=%s" % country)
def on_currency_combo_changed(self, combo):
text = combo.get_active_text()
if text != None:
print("Selected: currency=%s" % text)
win = ComboBoxWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main() |
py | 1a4929791466add09dd734c8bb7bdebe25764065 | from django.db import models
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
bio = models.TextField(max_length=500, blank=True)
birth_date = models.DateField(null=True, blank=True)
|
py | 1a492ab233df27d2aced22d90f0b9a18f6fa2557 | # Generated by Django 3.0.8 on 2020-08-12 21:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("main", "0035_auto_20200812_2020"),
]
operations = [
migrations.AddField(
model_name="comment",
name="email",
field=models.EmailField(max_length=254, null=True),
),
migrations.AddField(
model_name="comment",
name="name",
field=models.CharField(default="Anonymous", max_length=150, null=True),
),
]
|
py | 1a492bc82c5c087e82ae7ca1a783c34122c36528 | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import os
class Templight(CMakePackage):
"""Templight is a Clang-based tool to profile the time and memory
consumption of template instantiations and to perform interactive
debugging sessions to gain introspection into the template
instantiation process."""
homepage = "https://github.com/mikael-s-persson/templight"
git = "https://github.com/mikael-s-persson/templight.git"
llvm_svn = "http://llvm.org/svn/llvm-project/{0}/trunk"
family = 'compiler' # Used by lmod
# Templight is a patch to clang, so we have three versions to care about:
# - The one that will be used in Spack specifications
# - The git branch that we need to fetch from in the templight repo
# - The svn tag that we need to fetch from in the LLVM repos
version('develop', branch='master')
resource(name='llvm-trunk',
svn=llvm_svn.format('llvm'),
destination='.',
placement='llvm',
when='@develop')
resource(name='clang-trunk',
svn=llvm_svn.format('cfe'),
destination='llvm/tools',
placement='clang',
when='@develop')
# Templight has no stable release yet, and is supposed to be built against
# the LLVM trunk. As this is a brittle combination, I decided to
# artificially create stable releases based on what works today. Please
# feel free to remove these versions once templight has stabilized.
version('2019.01.09', commit='0899a4345607f1bb244cae477214f274ad2c52cc')
resource(name='llvm-r350726',
svn=llvm_svn.format('llvm'),
revision=350726,
destination='.',
placement='llvm',
when='@2019.01.09')
resource(name='clang-r350726',
svn=llvm_svn.format('cfe'),
revision=350726,
destination='llvm/tools',
placement='clang',
when='@2019.01.09')
version('2018.07.20', commit='91589f95427620dd0a2346bd69ba922f374aa42a')
resource(name='llvm-r337566',
svn=llvm_svn.format('llvm'),
revision=337566,
destination='.',
placement='llvm',
when='@2018.07.20')
resource(name='clang-r337566',
svn=llvm_svn.format('cfe'),
revision=337566,
destination='llvm/tools',
placement='clang',
when='@2018.07.20')
patch('develop-20180720.patch', when='@2018.07.20')
# Clang debug builds can be _huge_ (20+ GB), make sure you know what you
# are doing before switching to them
variant('build_type', default='Release',
description='CMake build type',
values=('Debug', 'Release', 'RelWithDebInfo', 'MinSizeRel'))
# NOTE: LLVM has many configurable tweaks and optional tools/extensions.
# I did not think that propagating all of these to a debugging and
# performance analysis tool was worth the maintenance burden. But
# if you disagree, the llvm package can be used for inspiration.
depends_on('[email protected]:', type='build')
depends_on('python')
depends_on('py-lit', type=('build', 'run'))
def patch(self):
# We start with the templight source tree and an "llvm" subdir.
# But we actually need an llvm source tree with a "templight" subdir.
# Let's flip the directory organization around
templight_files = os.listdir('.')
templight_files.remove('llvm')
templight_dir = 'llvm/tools/clang/tools/templight'
os.mkdir(templight_dir)
for name in templight_files:
os.rename(name, os.path.join(templight_dir, name))
for name in os.listdir('llvm'):
os.rename(os.path.join('llvm', name), name)
os.rmdir('llvm')
# Tell the clang build system that it needs to build templight
with open("tools/clang/tools/CMakeLists.txt", "a") as cmake_lists:
cmake_lists.write("add_clang_subdirectory(templight)")
def setup_environment(self, spack_env, run_env):
spack_env.append_flags('CXXFLAGS', self.compiler.cxx11_flag)
run_env.set('CC', join_path(self.spec.prefix.bin, 'templight'))
run_env.set('CXX', join_path(self.spec.prefix.bin, 'templight++'))
def cmake_args(self):
spec = self.spec
# Templight is a debugging tool, not a production compiler, so we only
# need a very bare-bones build of clang
#
# Minimal build config ideas were taken from the llvm package, with
# the templight-specific assumption that we will always be building
# for LLVM / Clang 5.0+ and can safely ignore older tricks.
#
cmake_args = [
'-DLLVM_REQUIRES_RTTI:BOOL=ON',
'-DCLANG_DEFAULT_OPENMP_RUNTIME:STRING=libomp',
'-DPYTHON_EXECUTABLE:PATH={0}'.format(spec['python'].command.path),
'-DLLVM_EXTERNAL_POLLY_BUILD:Bool=OFF',
'-DLLVM_TOOL_POLLY_BUILD:Bool=OFF',
'-DLLVM_POLLY_BUILD:Bool=OFF',
'-DLLVM_POLLY_LINK_INTO_TOOLS:Bool=OFF',
'-DLLVM_EXTERNAL_LLDB_BUILD:Bool=OFF',
'-DLLVM_TOOL_LLDB_BUILD:Bool=OFF',
'-DLLVM_TOOL_LLD_BUILD:Bool=OFF',
'-DLLVM_EXTERNAL_LIBUNWIND_BUILD:Bool=OFF',
'-DLLVM_EXTERNAL_LIBCXX_BUILD:Bool=OFF',
'-DLLVM_EXTERNAL_LIBCXXABI_BUILD:Bool=OFF',
'-DLLVM_EXTERNAL_COMPILER_RT_BUILD:Bool=OFF',
]
targets = ['NVPTX', 'AMDGPU']
if 'x86' in spec.architecture.target.lower():
targets.append('X86')
elif 'arm' in spec.architecture.target.lower():
targets.append('ARM')
elif 'aarch64' in spec.architecture.target.lower():
targets.append('AArch64')
elif 'sparc' in spec.architecture.target.lower():
targets.append('Sparc')
elif ('ppc' in spec.architecture.target.lower() or
'power' in spec.architecture.target.lower()):
targets.append('PowerPC')
cmake_args.append(
'-DLLVM_TARGETS_TO_BUILD:Bool=' + ';'.join(targets))
if spec.satisfies('platform=linux'):
cmake_args.append('-DCMAKE_BUILD_WITH_INSTALL_RPATH=1')
return cmake_args
@run_after('install')
def post_install(self):
with working_dir(self.build_directory):
install_tree('bin', self.prefix.libexec.llvm)
|
py | 1a492cbab36f4cb6366d851fe8ba5ac271cc76e6 | """Test for user views"""
from datetime import timedelta
import pytest
from django.urls import reverse
from rest_framework import status
from social_django.models import UserSocialAuth
from mitol.common.pytest_utils import any_instance_of
from mitol.common.utils import now_in_utc
from profiles.factories import UserFactory
from profiles.models import ChangeEmailRequest
@pytest.mark.django_db
def test_cannot_create_user(client):
"""Verify the api to create a user is nonexistent"""
resp = client.post("/api/users/", data={"name": "Name"})
assert resp.status_code == status.HTTP_404_NOT_FOUND
def test_cannot_update_user(user_client, user):
"""Verify the api to update a user is doesn't accept the verb"""
resp = user_client.patch(
reverse("users_api-detail", kwargs={"pk": user.id}), data={"name": "Name"}
)
assert resp.status_code == status.HTTP_405_METHOD_NOT_ALLOWED
def test_get_user_by_id(user_client, user):
"""Test that user can request their own user by id"""
resp = user_client.get(reverse("users_api-detail", kwargs={"pk": user.id}))
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {
"id": user.id,
"username": user.username,
"email": user.email,
"legal_address": {
"first_name": user.legal_address.first_name,
"last_name": user.legal_address.last_name,
"street_address": [user.legal_address.street_address_1],
"city": user.legal_address.city,
"state_or_territory": user.legal_address.state_or_territory,
"country": user.legal_address.country,
"postal_code": user.legal_address.postal_code,
"is_complete": True,
},
"profile": {
"name": user.profile.name,
"gender": user.profile.gender,
"company": user.profile.company,
"company_size": user.profile.company_size,
"job_title": user.profile.job_title,
"birth_year": int(user.profile.birth_year),
"job_function": user.profile.job_function,
"years_experience": user.profile.years_experience,
"highest_education": user.profile.highest_education,
"industry": user.profile.industry,
"is_complete": True,
"updated_on": any_instance_of(str),
"can_skip_application_steps": False,
},
"is_anonymous": False,
"is_authenticated": True,
}
def test_get_user_by_me(user_client, user):
"""Test that user can request their own user by the 'me' alias"""
resp = user_client.get(reverse("users_api-me"))
assert resp.status_code == status.HTTP_200_OK
assert resp.json() == {
"id": user.id,
"username": user.username,
"email": user.email,
"legal_address": {
"first_name": user.legal_address.first_name,
"last_name": user.legal_address.last_name,
"street_address": [user.legal_address.street_address_1],
"city": user.legal_address.city,
"state_or_territory": user.legal_address.state_or_territory,
"country": user.legal_address.country,
"postal_code": user.legal_address.postal_code,
"is_complete": True,
},
"profile": {
"name": user.profile.name,
"gender": user.profile.gender,
"company": user.profile.company,
"company_size": user.profile.company_size,
"job_title": user.profile.job_title,
"birth_year": int(user.profile.birth_year),
"job_function": user.profile.job_function,
"years_experience": user.profile.years_experience,
"highest_education": user.profile.highest_education,
"industry": user.profile.industry,
"is_complete": True,
"updated_on": any_instance_of(str),
"can_skip_application_steps": False,
},
"is_anonymous": False,
"is_authenticated": True,
}
@pytest.mark.django_db
def test_countries_states_view(client):
"""Test that a list of countries and states is returned"""
resp = client.get(reverse("countries_api-list"))
countries = {country["code"]: country for country in resp.json()}
assert len(countries.get("US").get("states")) > 50
assert {"code": "CA-QC", "name": "Quebec"} in countries.get("CA").get("states")
assert len(countries.get("FR").get("states")) == 0
assert countries.get("US").get("name") == "United States"
assert countries.get("TW").get("name") == "Taiwan"
def test_create_email_change_request_invalid_password(user_drf_client, user):
"""Test that invalid password is returned"""
resp = user_drf_client.post(
"/api/change-emails/",
data={
"new_email": "[email protected]",
"password": user.password,
"old_password": "abc",
},
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
def test_create_email_change_request_existing_email(user_drf_client, user):
"""Test that create change email request gives validation error for existing user email"""
new_user = UserFactory.create()
user_password = user.password
user.set_password(user.password)
user.save()
resp = user_drf_client.post(
"/api/change-emails/",
data={"new_email": new_user.email, "password": user_password},
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
def test_create_email_change_request_same_email(user_drf_client, user):
"""Test that user same email wouldn't be processed"""
resp = user_drf_client.post(
"/api/change-emails/",
data={
"new_email": user.email,
"password": user.password,
"old_password": user.password,
},
)
assert resp.status_code == status.HTTP_400_BAD_REQUEST
def test_create_email_change_request_valid_email(user_drf_client, user, mocker):
"""Test that change request is created"""
user_password = "PaSsWoRd"
user.set_password(user_password)
user.save()
mock_email = mocker.patch("mail.v2.verification_api.send_verify_email_change_email")
resp = user_drf_client.post(
"/api/change-emails/",
data={"new_email": "[email protected]", "password": user_password},
)
assert resp.status_code == status.HTTP_201_CREATED
code = mock_email.call_args[0][1].code
assert code
old_email = user.email
resp = user_drf_client.patch(
"/api/change-emails/{}/".format(code), data={"confirmed": True}
)
assert not UserSocialAuth.objects.filter(uid=old_email, user=user).exists()
assert resp.status_code == status.HTTP_200_OK
user.refresh_from_db()
assert user.email == "[email protected]"
def test_create_email_change_request_expired_code(user_drf_client, user):
"""Check for expired code for Email Change Request"""
change_request = ChangeEmailRequest.objects.create(
user=user,
new_email="[email protected]",
expires_on=now_in_utc() - timedelta(seconds=5),
)
resp = user_drf_client.patch(
"/api/change-emails/{}/".format(change_request.code), data={"confirmed": True}
)
assert resp.status_code == status.HTTP_404_NOT_FOUND
def test_update_email_change_request_invalid_token(user_drf_client):
"""Test that invalid token doesn't work"""
resp = user_drf_client.patch("/api/change-emails/abc/", data={"confirmed": True})
assert resp.status_code == status.HTTP_404_NOT_FOUND
|
py | 1a492cc26eab16785e002863844bb4603bd8e858 | from urllib2 import urlopen
from zipfile import ZipFile
from bs4 import BeautifulSoup
import os
import sys
from MaudeMiner.settings import MAUDE_DATA_ORIGIN, DATA_PATH, ZIPS_PATH, TXTS_PATH
from MaudeMiner.utils import update_progress
def get_data_urls():
sys.stdout.write("Scraping MAUDE website for data urls: ")
sys.stdout.flush()
urls = []
soup = BeautifulSoup(str(urlopen(MAUDE_DATA_ORIGIN).read()), "html5lib")
# search page for anchor tags with links to zip files
for a in soup.findAll('a'):
if a.has_attr('href') and ".zip" in a['href']:
urls.append(a['href'])
sys.stdout.write("OK - found %i URLs\n" % len(urls))
sys.stdout.flush()
return urls
def prepare_directory():
sys.stdout.write("Preparing data directory: ")
sys.stdout.flush()
# make directories if they dont exist
if not os.path.exists(ZIPS_PATH):
os.makedirs(ZIPS_PATH)
if not os.path.exists(TXTS_PATH):
os.makedirs(TXTS_PATH)
# remove existing data files
for file in os.listdir(ZIPS_PATH):
os.remove(ZIPS_PATH+file)
for file in os.listdir(TXTS_PATH):
os.remove(TXTS_PATH+file)
sys.stdout.write("OK - %s\n" % DATA_PATH)
sys.stdout.flush()
def download_data(urls):
message = "Downloading data: "
num_complete = 0
update_progress(message, 0, len(urls))
for url in urls:
# get file name
begin = url.rfind("/")+1
zip_filename = url[begin:]
with open(ZIPS_PATH+zip_filename, 'wb') as zipfile:
zipfile.write(urlopen(url).read())
num_complete += 1
update_progress(message, num_complete, len(urls))
print "\r{0}OK - {1}".format(message, ZIPS_PATH)
return num_complete
def extract_data(num_files):
message = "Extracting data: "
num_complete = 0
update_progress(message, 0, num_files)
for file in os.listdir(ZIPS_PATH):
zip_data = ZipFile(ZIPS_PATH+file, 'r').extractall(TXTS_PATH)
num_complete += 1
update_progress(message, num_complete, num_files)
print "\r{0}OK - {1}".format(message, TXTS_PATH)
def run(args=None):
print "=== Downloading Data ==="
prepare_directory()
zip_urls = get_data_urls()
num_files = download_data(zip_urls)
extract_data(num_files)
print "Done!"
if __name__ == "__main__":
run() |
py | 1a492d985eb81de77e327e015fbbdcac8c1cb876 | from __future__ import print_function, division
import sys, os
sys.path.append(os.path.abspath("."))
from problems.problem import *
from helper.pom3 import pom3
__author__ = 'panzer'
class POM3BSansComp(Problem):
"""
POM 3B without Completion
"""
def __init__(self):
Problem.__init__(self)
self.name = POM3BSansComp.__name__
names = ["Culture", "Criticality", "Criticality Modifier", "Initial Known", "Inter-Dependency", "Dynamism",
"Size", "Plan", "Team Size"]
lows = [0.10, 0.82, 80, 0.40, 0, 1, 0, 0, 1]
ups = [0.90, 1.26, 95, 0.70, 100, 50, 2, 5, 20]
self.decisions = [Decision(names[i], lows[i], ups[i]) for i in range(len(names))]
self.objectives = [Objective("Cost", True, 0), Objective("Score", False, 0, 1),
Objective("Idle", True, 0, 1)]
def evaluate(self, decisions):
p = pom3()
output = p.simulate(decisions)
return [output[0], output[1], output[3]]
|
py | 1a492dffab361a0e6fbe11c2e335d11e4653814f | # Generated by Django 2.1.5 on 2019-01-31 13:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('wagtailimages', '0021_image_file_hash'),
('kdl_wagtail_people', '0004_peopleindexpage_peopleindexpersonrelationship'),
]
operations = [
migrations.CreateModel(
name='PersonPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('introduction', models.TextField(blank=True, help_text='Text to describe the page')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('person', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='pages', to='kdl_wagtail_people.Person')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
py | 1a492e25978fc83272913fee4492a56f4a8c4faa | def dobro(num, formatado=False):
if formatado:
return f'R${num * 2:.2f}'
else:
return num * 2
def metade(num, formatado=False):
if formatado:
return f'R${num / 2:.2f}'
else:
return num / 2
def adicionar(num, index, formatado=False):
if formatado:
return f'R${num + (num / 100 * index):.2f}'
else:
return num + (num / 100 * index)
def descontar(num, index, formatado=False):
if formatado:
return f'R${num - (num / 100 * index):.2f}'
else:
return num - (num / 100 * index)
def cifrar(num):
return f'R${num:.2f}'
def resumo(valor, aumento, desconto):
print('-' * 40)
print('{:^40}'.format('RESUMO DO VALOR'))
print('¯' * 40)
print('Preço analisado:', end='')
print(f'{(cifrar(valor)):>24}')
print('Dobro do Preço:', end='')
print(f'{dobro(valor, True):>25}')
print(f'Metado do Preço', end='')
print(f'{metade(valor, True):>25}')
print(f'{aumento}% de aumento:', end='')
print(f'{adicionar(valor, aumento, True):>25}')
print(f'{desconto}% de desconto:', end='')
print(f'{descontar(valor, desconto, True):>24}')
print('¯' * 40)
|
py | 1a492e27097666ac2decf2d40feb2eb78f339dc1 | import time
def print_board(board):
for e in board:
for f in e:
print(f, end=" ")
print()
def get_neighbour_count(board,y,x):
c = 0
for a in range(y-1,y+2,1):
for b in range(x-1,x+2,1):
if not (a==y and b==x):
try:
if board[a][b] == 1:
c+=1
except: pass
return c
def should_change(state, neighbours):
if state == 1:
if n < 2:
return True
if n == 2 or n == 3:
return False
if n > 3:
return True
else:
if n == 3:
return True
return False
def change(board, lst):
for e in lst:
tmp = board[e[0]][e[1]]
if tmp == 1:
board[e[0]][e[1]] = 0
else:
board[e[0]][e[1]] = 1
return board
def game_over(board):
if sum(x.count(0) for x in board) == len(board)*len(board[0]):
return True
return False
if __name__ == '__main__':
board= [[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,1,0,0,0,0,0],
[0,0,0,0,0,0,1,0,0,0],
[0,0,0,0,1,0,0,1,0,0],
[0,0,0,0,1,1,0,1,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0]]
h = len(board)
w = len(board[0])
while not game_over(board):
print('--'*w)
print_board(board)
change_lst = []
for y in range(h):
for x in range(w):
n = get_neighbour_count(board,y,x)
if should_change(board[y][x],n):
change_lst.append([y,x])
board = change(board, change_lst)
time.sleep(0.5)
print_board(board) |
py | 1a492e62ee0ab25db0dfa63d6a4b2b9eb3e3e634 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
exporters
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import os
import itertools
import six
import inspect
import abc
import logging
import numpy as np
import paddle.fluid as F
import paddle.fluid.layers as L
from propeller.paddle.train import Saver
from propeller.types import InferenceSpec
from propeller.train.model import Model
from propeller.paddle.train.trainer import _build_net
from propeller.paddle.train.trainer import _build_model_fn
from propeller.types import RunMode
from propeller.types import ProgramPair
log = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class Exporter(object):
"""base exporter"""
@abc.abstractmethod
def export(self, exe, program, eval_result, state):
"""export"""
raise NotImplementedError()
class BestExporter(Exporter):
"""export saved model accordingto `cmp_fn`"""
def __init__(self, export_dir, cmp_fn):
"""doc"""
self._export_dir = export_dir
self._best = None
self.cmp_fn = cmp_fn
def export(self, exe, program, eval_model_spec, eval_result, state):
"""doc"""
log.debug('New evaluate result: %s \nold: %s' %
(repr(eval_result), repr(self._best)))
if self._best is None or self.cmp_fn(old=self._best, new=eval_result):
log.debug('[Best Exporter]: export to %s' % self._export_dir)
eval_program = program.train_program
# FIXME: all eval datasets has same name/types/shapes now!!! so every eval program are the smae
saver = Saver(
self._export_dir,
exe,
program=eval_program,
max_ckpt_to_keep=1)
saver.save(state)
self._best = eval_result
else:
log.debug('[Best Exporter]: skip step %s' % state.gstep)
class BestInferenceModelExporter(Exporter):
"""export inference model accordingto `cmp_fn`"""
def __init__(self,
export_dir,
cmp_fn,
model_class_or_model_fn=None,
hparams=None,
dataset=None):
"""doc"""
self._export_dir = export_dir
self._best = None
self.cmp_fn = cmp_fn
self.model_class_or_model_fn = model_class_or_model_fn
self.hparams = hparams
self.dataset = dataset
def export(self, exe, program, eval_model_spec, eval_result, state):
"""doc"""
if self.model_class_or_model_fn is not None and self.hparams is not None \
and self.dataset is not None:
log.info('Building program by user defined model function')
if issubclass(self.model_class_or_model_fn, Model):
_model_fn = _build_model_fn(self.model_class_or_model_fn)
elif inspect.isfunction(self.model_class_or_model_fn):
_model_fn = self.model_class_or_model_fn
else:
raise ValueError('unknown model %s' %
self.model_class_or_model_fn)
# build net
infer_program = F.Program()
startup_prog = F.Program()
with F.program_guard(infer_program, startup_prog):
#share var with Train net
with F.unique_name.guard():
log.info('Building Infer Graph')
infer_fea = self.dataset.features()
# run_config is None
self.model_spec = _build_net(_model_fn, infer_fea,
RunMode.PREDICT, self.hparams,
None)
log.info('Done')
infer_program = infer_program.clone(for_test=True)
self.program = ProgramPair(
train_program=infer_program, startup_program=startup_prog)
else:
self.program = program
self.model_spec = eval_model_spec
log.debug('New evaluate result: %s \nold: %s' %
(repr(eval_result), repr(self._best)))
if self._best is None or self.cmp_fn(old=self._best, new=eval_result):
log.debug('[Best Exporter]: export to %s' % self._export_dir)
if self.model_spec.inference_spec is None:
raise ValueError('model_fn didnt return InferenceSpec')
inf_spec_dict = self.model_spec.inference_spec
if not isinstance(inf_spec_dict, dict):
inf_spec_dict = {'inference': inf_spec_dict}
for inf_spec_name, inf_spec in six.iteritems(inf_spec_dict):
if not isinstance(inf_spec, InferenceSpec):
raise ValueError('unknow inference spec type: %s' %
inf_spec)
save_dir = os.path.join(self._export_dir, inf_spec_name)
log.debug('[Best Exporter]: save inference model: "%s" to %s' %
(inf_spec_name, save_dir))
feed_var = [i.name for i in inf_spec.inputs]
fetch_var = inf_spec.outputs
infer_program = self.program.train_program
startup_prog = F.Program()
F.io.save_inference_model(
save_dir,
feed_var,
fetch_var,
exe,
main_program=infer_program)
self._best = eval_result
else:
log.debug('[Best Exporter]: skip step %s' % state.gstep)
|
py | 1a492fee3a57bb72b064249c93c284f24f3492be | #!/usr/bin/env python
"""
ErrorServerDetection plugin performs CDN detection when attempts to access
the web server via its IP address fail and disclose information about
the CDN in place.
"""
from plugins.ErrorServerDetection.behaviors import detect
|
py | 1a4930276b6df579d519dd309adf3babf87cea83 | from __future__ import unicode_literals
from django.db import models
__all__ = ('CharModel', 'IntegerModel', 'TextModel', 'BooleanModel'
'DateModel', 'DateTimeModel', 'ForeignKeyModel', 'ManyToManyModel',
'FileModel', 'TestModel',)
class CharModel(models.Model):
field = models.CharField(max_length=10)
class IntegerModel(models.Model):
field = models.IntegerField()
class TextModel(models.Model):
field = models.TextField(max_length=100)
class BooleanModel(models.Model):
field = models.BooleanField(default=False)
class DateModel(models.Model):
field = models.DateField()
class DateTimeModel(models.Model):
field = models.DateTimeField()
class ForeignKeyModel(models.Model):
field = models.ForeignKey(CharModel, on_delete=models.CASCADE)
class ManyToManyModel(models.Model):
field = models.ManyToManyField(CharModel)
class FileModel(models.Model):
field = models.FileField(upload_to='.', max_length=256)
class TestModel(models.Model):
field1 = models.CharField(max_length=10, verbose_name="Field #1")
field2 = models.IntegerField(verbose_name="Field #2")
no_verbose = models.IntegerField()
def __str__(self):
return '%s %i' % (self.field1, self.field2)
__unicode__ = __str__
def get_double(self):
return self.field2 * 2
|
py | 1a49307c49dd387a76f678e2aed1f1decf53aa12 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, GoElite and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('Conference booking')
class TestConferencebooking(unittest.TestCase):
pass
|
py | 1a49319dc465ac6b855ddca18c769832ed332aa9 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Diamond(CMakePackage):
"""DIAMOND is a sequence aligner for protein and translated DNA searches,
designed for high performance analysis of big sequence data."""
homepage = "https://ab.inf.uni-tuebingen.de/software/diamond"
url = "https://github.com/bbuchfink/diamond/archive/v2.0.9.tar.gz"
version('2.0.14', sha256='3eaef2b957e4ba845eac27a2ca3249aae4259ff1fe0ff5a21b094481328fdc53')
version('2.0.11', sha256='41f3197aaafff9c42763fb7658b67f730ebc6dd3c0533c9c3d54bd3166e93f24')
version('2.0.9', sha256='3019f1adb6411c6669a3a17351d0338ae02f6b3cab3c8a3bac91cf334dcda620')
version('2.0.8', sha256='04eed7c83828f50c7d9a1d48fe7c50a4c753e008501dc639c6521cf8a756c43b')
version('2.0.4', sha256='94e8fe72bdc28b83fd0f2d90c439b58b63b38263aa1a3905582ef68f614ae95d')
version('0.9.25', sha256='65298f60cf9421dcc7669ce61642611cd9eeffc32f66fd39ebfa25dd64416808')
version('0.9.23', sha256='0da5cdd5e5b77550ec0eaba2c6c431801cdd10d31606ca12f952b57d3d31db92')
version('0.9.22', sha256='35e518cfa0ac2fbc57e422d380bdb5123c6335742dd7965b76c34c95f241b729')
version('0.9.21', sha256='3f10e089c24d24f3066f3a58fa01bf356c4044e0a0bcab081b9bf1a8d946c9b1')
version('0.9.20', sha256='5cf629baf135f54dc93728e3618ae08c64c1ecb81b3f2d2d48fcfd1c010ed8f0')
version('0.9.19', sha256='fab783f51af9010666f2b569f438fb38843d0201fe0c0e167db5b70d12459e30')
version('0.9.14', sha256='de870a7806ac0aa47b97c9b784dd7201e2c8e11a122003bde440d926211b911e')
version('0.8.38', sha256='582a7932f3aa73b0eac2275dd773818665f0b067b32a79ff5a13b0e3ca375f60')
version('0.8.26', sha256='00d2be32dad76511a767ab8e917962c0ecc572bc808080be60dec028df45439f')
depends_on('zlib')
conflicts('target=aarch64:', when='@:0.9.25')
# fix error [-Wc++11-narrowing]
# Ref: https://github.com/bbuchfink/diamond/commit/155e076d662b0e9268e2b00bef6d33d90aede7ff
patch('fix_narrowing_error.patch', when='@:0.9.25')
|
py | 1a4931bf3c01edadd732448e7b9b313674cb0761 | # Django settings for demo project.
import os
settings_path, settings_module = os.path.split(__file__)
import sys
sys.path.append('../../')
DEBUG = True
#TEMPLATE_DEBUG = DEBUG
USE_TZ=True
#TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SECRET_KEY = '8(o*lht586wqr9hp5env&n!h!gu@t5g4*$$uupbyd*f+61!xjh'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'mydatabase',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
# 'django.contrib.admin',
)
MIDDLEWARE_CLASSES = (
)
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(settings_path, 'templates')],
}
]
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_BACKEND = 'postmark.django_backend.EmailBackend'
#Supply your own API KEY
POSTMARK_API_KEY = ''
assert len(POSTMARK_API_KEY) != 0
#Use the sender set up in your postmark account
POSTMARK_SENDER = ''
assert len(POSTMARK_SENDER) != 0
|
py | 1a49327c59ef887b434858537651f8cb7d71b646 | def test_dictionary():
"""Dictionary"""
fruits_dictionary = {
'cherry': 'red',
'apple': 'green',
'banana': 'yellow',
}
assert isinstance(fruits_dictionary, dict)
assert fruits_dictionary['apple'] == 'green'
assert fruits_dictionary['banana'] == 'yellow'
assert fruits_dictionary['cherry'] == 'red'
assert 'apple' in fruits_dictionary
assert 'pineapple' not in fruits_dictionary
# Modify
fruits_dictionary['apple'] = 'red'
# Add
fruits_dictionary['pineapple'] = 'yellow'
assert fruits_dictionary['pineapple'] == "yellow"
assert list(fruits_dictionary) == ['cherry', 'apple', 'banana', 'pineapple']
assert sorted(fruits_dictionary) == [
'apple', 'banana', 'cherry', 'pineapple'
]
del fruits_dictionary['pineapple']
assert list(fruits_dictionary) == ['cherry', 'apple', 'banana']
dictionary_via_constructor = dict([('sape', 4139), ('guido', 4127),
('jack', 4098)])
assert dictionary_via_constructor['sape'] == 4139
assert dictionary_via_constructor['guido'] == 4127
assert dictionary_via_constructor['jack'] == 4098
dictionary_via_expression = {x: x**2 for x in (2, 4, 6)}
assert dictionary_via_expression[2] == 4
assert dictionary_via_expression[4] == 16
assert dictionary_via_expression[6] == 36
dictionary_for_string_keys = dict(sape=4139, guido=4127, jack=4098)
assert dictionary_for_string_keys['sape'] == 4139
assert dictionary_for_string_keys['guido'] == 4127
assert dictionary_for_string_keys['jack'] == 4098
|
py | 1a49345758697fa12afe239335dea4e78ddbb06f | # users/views.py
from django.contrib.auth.models import User
from django.views.decorators.csrf import csrf_exempt
from rest_framework import generics, request, status
from rest_framework.decorators import api_view
from rest_framework.permissions import IsAuthenticated
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework.decorators import api_view
from rest_framework.response import Response
from house.services import CosineCalculator
import ipdb
from house.serializers import HouseSerializer
from house.serializers import RoomSerializer
from users.models import Profile
from . import models
from . import serializers
from .models import House, Room
@csrf_exempt
@api_view(['POST'])
def create_house(request):
token_header = request.META.get("HTTP_AUTHORIZATION")[6:]
token = Token.objects.get(key=token_header)
user = User.objects.get(id=token.user_id)
house = HouseSerializer(data=request.data)
if house.is_valid():
house.save(landlord=user)
return Response({'status':'ok'}, status=status.HTTP_201_CREATED)
else:
return Response(house._errors, status=status.HTTP_400_BAD_REQUEST)
# @api_view(['GET'])
# def get_rooms(request):
# token_header = request.META.get("HTTP_AUTHORIZATION")[6:]
# # import ipdb;
# # ipdb.set_trace()
# try:
# token = Token.objects.get(key=token_header)
# except Token.DoesNotExist:
# return Response({"status": "Nao autenticado", "load": 0}, status=status.HTTP_200_OK)
#
#
# rooms = models.Room.objects.all()
# serializers_room = RoomSerializer(rooms, many=True)
# return Response({"rooms": serializers_room.data}, status=status.HTTP_200_OK)
#
# @csrf_exempt
@api_view(['GET'])
def get_rooms(request):
token_header = request.META.get("HTTP_AUTHORIZATION")[6:]
token = Token.objects.get(key=token_header)
profile = Profile.objects.get(user_id=token.user_id)
cosine = CosineCalculator()
houses = House.objects.all()
future_tenant = Profile.objects.get(user_id=token.user_id)
houses = cosine.calculate_similarity_all_houses(houses, future_tenant, profile)
rooms = []
for house in houses:
rooms_buffer = Room.objects.filter(house=house, tenant__isnull=True).all()
for room_buffer in rooms_buffer:
room_serializer = serializers.RoomSerializer(room_buffer)
# ipdb.set_trace()
# if room_serializer.tenant is not None: continue
new_room = {"value": house.value}
new_room.update(room_serializer.data)
rooms.append(new_room)
return Response({"rooms": rooms}, status=status.HTTP_200_OK) |
py | 1a49347844f964da5445e3393863be76ea404def | class PropertyExpectedException(Exception):
pass
class InvalidStateException(Exception):
pass
|
py | 1a49348dc83b5c1210cec19b7dc893df198276dd | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.base.errors import ExchangeError
class paymium (Exchange):
def describe(self):
return self.deep_extend(super(paymium, self).describe(), {
'id': 'paymium',
'name': 'Paymium',
'countries': ['FR', 'EU'],
'rateLimit': 2000,
'version': 'v1',
'has': {
'CORS': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/27790564-a945a9d4-5ff9-11e7-9d2d-b635763f2f24.jpg',
'api': 'https://paymium.com/api',
'www': 'https://www.paymium.com',
'doc': [
'https://github.com/Paymium/api-documentation',
'https://www.paymium.com/page/developers',
],
},
'api': {
'public': {
'get': [
'countries',
'data/{id}/ticker',
'data/{id}/trades',
'data/{id}/depth',
'bitcoin_charts/{id}/trades',
'bitcoin_charts/{id}/depth',
],
},
'private': {
'get': [
'merchant/get_payment/{UUID}',
'user',
'user/addresses',
'user/addresses/{btc_address}',
'user/orders',
'user/orders/{UUID}',
'user/price_alerts',
],
'post': [
'user/orders',
'user/addresses',
'user/payment_requests',
'user/price_alerts',
'merchant/create_payment',
],
'delete': [
'user/orders/{UUID}/cancel',
'user/price_alerts/{id}',
],
},
},
'markets': {
'BTC/EUR': {'id': 'eur', 'symbol': 'BTC/EUR', 'base': 'BTC', 'quote': 'EUR', 'baseId': 'btc', 'quoteId': 'eur'},
},
'fees': {
'trading': {
'maker': 0.0059,
'taker': 0.0059,
},
},
})
async def fetch_balance(self, params={}):
await self.load_markets()
response = await self.privateGetUser(params)
result = {'info': response}
currencies = list(self.currencies.keys())
for i in range(0, len(currencies)):
code = currencies[i]
currencyId = self.currencyId(code)
free = 'balance_' + currencyId
if free in response:
account = self.account()
used = 'locked_' + currencyId
account['free'] = self.safe_float(response, free)
account['used'] = self.safe_float(response, used)
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
request = {
'id': self.market_id(symbol),
}
response = await self.publicGetDataIdDepth(self.extend(request, params))
return self.parse_order_book(response, None, 'bids', 'asks', 'price', 'amount')
async def fetch_ticker(self, symbol, params={}):
request = {
'id': self.market_id(symbol),
}
ticker = await self.publicGetDataIdTicker(self.extend(request, params))
timestamp = self.safe_timestamp(ticker, 'at')
vwap = self.safe_float(ticker, 'vwap')
baseVolume = self.safe_float(ticker, 'volume')
quoteVolume = None
if baseVolume is not None and vwap is not None:
quoteVolume = baseVolume * vwap
last = self.safe_float(ticker, 'price')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': self.safe_float(ticker, 'bid'),
'bidVolume': None,
'ask': self.safe_float(ticker, 'ask'),
'askVolume': None,
'vwap': vwap,
'open': self.safe_float(ticker, 'open'),
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': self.safe_float(ticker, 'variation'),
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def parse_trade(self, trade, market):
timestamp = self.safe_timestamp(trade, 'created_at_int')
id = self.safe_string(trade, 'uuid')
symbol = None
if market is not None:
symbol = market['symbol']
side = self.safe_string(trade, 'side')
price = self.safe_float(trade, 'price')
amountField = 'traded_' + market['base'].lower()
amount = self.safe_float(trade, amountField)
cost = None
if price is not None:
if amount is not None:
cost = amount * price
return {
'info': trade,
'id': id,
'order': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': None,
'side': side,
'takerOrMaker': None,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'id': market['id'],
}
response = await self.publicGetDataIdTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
request = {
'type': self.capitalize(type) + 'Order',
'currency': self.market_id(symbol),
'direction': side,
'amount': amount,
}
if type != 'market':
request['price'] = price
response = await self.privatePostUserOrders(self.extend(request, params))
return {
'info': response,
'id': response['uuid'],
}
async def cancel_order(self, id, symbol=None, params={}):
request = {
'UUID': id,
}
return await self.privateDeleteUserOrdersUUIDCancel(self.extend(request, params))
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
auth = nonce + url
if method == 'POST':
if query:
body = self.json(query)
auth += body
headers = {
'Api-Key': self.apiKey,
'Api-Signature': self.hmac(self.encode(auth), self.encode(self.secret)),
'Api-Nonce': nonce,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'errors' in response:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
py | 1a493591e279277130586739fb9366c90cb46b2e | # coding: utf-8
#########################################################################
# 网站: <a href="http://www.crazyit.org">疯狂Java联盟</a> #
# author yeeku.H.lee [email protected] #
# #
# version 1.0 #
# #
# Copyright (C), 2001-2018, yeeku.H.Lee #
# #
# This program is protected by copyright laws. #
# #
# Program Name: #
# #
# <br>Date: #
#########################################################################
import os
path = 'my_dir'
# 直接重命名当前目录下的子目录
os.rename(path, 'your_dir')
path = "abc/xyz/wawa"
# 递归重命名子目录
os.renames(path, 'foo/bar/haha') |
py | 1a4936a64a48de814bafb0d0506aede3ea1918b4 | import fire
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import os
import time
import datetime
from dcgan.discriminator import make_discriminator_model, discriminator_loss
from dcgan.generator import make_generator_model, generator_loss
from dcgan.dataset import make_dataset
from dcgan.utils import *
from dcgan.metrics import *
from dcgan import CHECKPOINT_DIR, MODEL_DIR
try:
from IPython import display
except:
pass
@tf.function
def train_step(
images,
epoch,
summary_writer,
generator,
discriminator,
generator_optimizer,
discriminator_optimizer,
):
noise = tf.random.normal([256, 100])
# tf.random.gau
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(noise, training=True)
real_output = discriminator(images, training=True)
fake_output = discriminator(generated_images, training=True)
gen_loss = generator_loss(fake_output)
# gen_loss = tf.vectorized_map(generator_loss, fake_output)
# disc_loss = tf.vectorized_map(discriminator_loss, fake_output)
disc_loss = discriminator_loss(real_output, fake_output)
gen_loss_metric.update_state(gen_loss)
disc_loss_metric.update_state(disc_loss)
fake_out.update_state(fake_output[0])
real_out.update_state(real_output[0])
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables
)
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables)
)
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables)
)
record_metrics(epoch, summary_writer)
def train(epochs, logname, channels=1, batch_size=256, data_folder=None):
tf.profiler.experimental.server.start(6009)
generator = make_generator_model(32, channels)
discriminator = make_discriminator_model(32, channels)
generator_optimizer = tf.keras.optimizers.Adam(1e-04, beta_1=0.5)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-04, beta_1=0.5)
checkpoint = tf.train.Checkpoint(
step=tf.Variable(1),
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
generator=generator,
discriminator=discriminator,
)
manager = tf.train.CheckpointManager(checkpoint, CHECKPOINT_DIR, max_to_keep=3)
summary_writer = make_summary_writer(logname)
dataset = make_dataset(32, data_folder, channels)
show_dataset(dataset, 16, summary_writer)
checkpoint.restore(manager.latest_checkpoint)
if manager.latest_checkpoint:
print("Restored from {}".format(manager.latest_checkpoint))
current_step = int(checkpoint.step.numpy())
print(
f"Continuing from epoch {current_step} + {epochs} -> {epochs + current_step}"
)
epochs = range(current_step, epochs + current_step)
else:
epochs = range(epochs)
print("Initializing from scratch.")
for epoch in epochs:
seed = tf.random.normal([16, 100])
start = time.time()
fake_out.reset_states()
real_out.reset_states()
gen_loss_metric.reset_states()
disc_loss_metric.reset_states()
for step, img_batch in enumerate(dataset.take(256)):
train_step(img_batch, epoch, summary_writer, generator, discriminator, generator_optimizer, discriminator_optimizer)
display.clear_output(wait=True)
generate_and_save_images(generator, epoch + 1, seed, summary_writer)
checkpoint.step.assign_add(1)
if int(checkpoint.step) % 15 == 0:
save_path = manager.save()
print(
"Saved checkpoint for step {}: {}".format(
int(checkpoint.step), save_path
)
)
# Produce images for the GIF as we go
print("Time for epoch {} is {} sec".format(epoch + 1, time.time() - start))
# Generate after the final epoch
display.clear_output(wait=True)
generate_and_save_images(generator, epochs, seed, summary_writer)
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
generator.save(os.path.join(MODEL_DIR, f"gen_trained_{current_time}"))
discriminator.save(os.path.join(MODEL_DIR, f"disc_trained_{current_time}"))
def fire_():
fire.Fire(train)
|
py | 1a4936aab734f430c6de01905e80ce9304930a12 | # -*- coding: utf-8 -*-
import logging
import re
from urllib.parse import quote_plus
from requests.exceptions import RequestException
from flexget import plugin
from flexget.components.sites.utils import normalize_scene, torrent_availability
from flexget.config_schema import one_or_more
from flexget.entry import Entry
from flexget.event import event
from flexget.plugin import PluginError
from flexget.utils.requests import Session as RequestSession
from flexget.utils.soup import get_soup
from flexget.utils.tools import parse_filesize
log = logging.getLogger('fuzer')
requests = RequestSession()
CATEGORIES = {
# Movies
'HD Movies': 9,
'XviD': 7,
'BRRip': 59,
'Israeli HD Movies': 61,
'Israeli Movies': 60,
'DVDR': 58,
'Dubbed Movies': 83,
# TV
'HD Shows': 10,
'Shows': 8,
'Israeli HD Shows': 63,
'Israeli Shows': 62,
'Dubbed Shows': 84,
# Anime
'Anime': 65,
# FuzePacks
'Movie Packs': 73,
'Shows Packs': 76,
}
class UrlRewriteFuzer:
schema = {
'type': 'object',
'properties': {
'cookie_password': {'type': 'string'},
'user_id': {'type': 'integer'},
'rss_key': {'type': 'string'},
'category': one_or_more(
{'oneOf': [{'type': 'string', 'enum': list(CATEGORIES)}, {'type': 'integer'}]}
),
},
'required': ['user_id', 'cookie_password', 'rss_key'],
'additionalProperties': False,
}
def get_fuzer_soup(self, search_term, categories_list):
params = {'matchquery': 'any', 'ref_': 'advanced'}
query = '{}&{}'.format(search_term, '&'.join(categories_list))
try:
page = requests.get(
'https://www.fuzer.me/browse.php?query={}'.format(query),
params=params,
cookies=self.cookies,
)
except RequestException as e:
raise PluginError('Could not connect to Fuzer: {}'.format(e))
if 'login' in page.url:
raise PluginError('Could not fetch results from Fuzer. Check config')
log.debug('Using %s as fuzer search url', page.url)
return get_soup(page.content)
def extract_entry_from_soup(self, soup):
table = soup.find('div', {'id': 'main_table'})
if table is None:
raise PluginError('Could not fetch results table from Fuzer, aborting')
log.trace('fuzer results table: %s', table)
table = table.find('table', {'class': 'table_info'})
if len(table.find_all('tr')) == 1:
log.debug('No search results were returned from Fuzer, continuing')
return []
entries = []
for tr in table.find_all("tr"):
if not tr.get('class') or 'colhead_dark' in tr.get('class'):
continue
name = tr.find('div', {'class': 'main_title'}).find('a').text
torrent_name = re.search(
'\\n(.*)', tr.find('div', {'style': 'float: right;'}).find('a')['title']
).group(1)
attachment_link = tr.find('div', {'style': 'float: right;'}).find('a')['href']
attachment_id = re.search(r'attachmentid=(\d+)', attachment_link).group(1)
raw_size = tr.find_all('td', {'class': 'inline_info'})[0].text.strip()
seeders = int(tr.find_all('td', {'class': 'inline_info'})[2].text)
leechers = int(tr.find_all('td', {'class': 'inline_info'})[3].text)
e = Entry()
e['title'] = name
final_url = 'https://www.fuzer.me/rss/torrent.php/{}/{}/{}/{}'.format(
attachment_id, self.user_id, self.rss_key, torrent_name
)
log.debug('RSS-ified download link: %s', final_url)
e['url'] = final_url
e['torrent_seeds'] = seeders
e['torrent_leeches'] = leechers
e['torrent_availability'] = torrent_availability(
e['torrent_seeds'], e['torrent_leeches']
)
size = re.search(r'(\d+(?:[.,]\d+)*)\s?([KMGTP]B)', raw_size)
e['content_size'] = parse_filesize(size.group(0))
entries.append(e)
return entries
@plugin.internet(log)
def search(self, task, entry, config=None):
"""
Search for name from fuzer.
"""
self.rss_key = config['rss_key']
self.user_id = config['user_id']
self.cookies = {
'fzr2lastactivity': '0',
'fzr2lastvisit': '',
'fzr2password': config['cookie_password'],
'fzr2sessionhash': '',
'fzr2userid': str(self.user_id),
}
category = config.get('category', [0])
# Make sure categories is a list
if not isinstance(category, list):
category = [category]
# If there are any text categories, turn them into their id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in category]
c_list = ['c{}={}'.format(quote_plus('[]'), c) for c in categories]
entries = []
if entry.get('imdb_id'):
log.debug("imdb_id '%s' detected, using in search.", entry['imdb_id'])
soup = self.get_fuzer_soup(entry['imdb_id'], c_list)
entries = self.extract_entry_from_soup(soup)
if entries:
for e in list(entries):
e['imdb_id'] = entry.get('imdb_id')
else:
for search_string in entry.get('search_strings', [entry['title']]):
query = normalize_scene(search_string)
text = quote_plus(query.encode('windows-1255'))
soup = self.get_fuzer_soup(text, c_list)
entries += self.extract_entry_from_soup(soup)
return (
sorted(entries, reverse=True, key=lambda x: x.get('torrent_availability'))
if entries
else []
)
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteFuzer, 'fuzer', interfaces=['search'], api_ver=2)
|
py | 1a4936b5af64638c1cb60f13def88a6f9a9d4c8b | """Progress bars for SDGym compatible with logging and dask."""
import io
import logging
from datetime import datetime, timedelta
LOGGER = logging.getLogger(__name__)
class TqdmLogger(io.StringIO):
_buffer = ''
def write(self, buf):
self._buffer = buf.strip('\r\n\t ')
def flush(self):
LOGGER.info(self._buffer)
def progress(*futures):
"""Track progress of dask computation in a remote cluster.
LogProgressBar is defined inside here to avoid having to import
its dependencies if not used.
"""
# Import distributed only when used
from distributed.client import futures_of # pylint: disable=C0415
from distributed.diagnostics.progressbar import TextProgressBar # pylint: disable=c0415
class LogProgressBar(TextProgressBar):
"""Dask progress bar based on logging instead of stdout."""
last = 0
logger = logging.getLogger('distributed')
def _draw_bar(self, remaining, all, **kwargs): # pylint: disable=W0221,W0622
done = all - remaining
frac = (done / all) if all else 0
if frac > self.last + 0.01:
self.last = int(frac * 100) / 100
bar = "#" * int(self.width * frac)
percent = int(100 * frac)
time_per_task = self.elapsed / (all - remaining)
remaining_time = timedelta(seconds=time_per_task * remaining)
eta = datetime.utcnow() + remaining_time
elapsed = timedelta(seconds=self.elapsed)
msg = "[{0:<{1}}] | {2}/{3} ({4}%) Completed | {5} | {6} | {7}".format(
bar, self.width, done, all, percent, elapsed, remaining_time, eta
)
self.logger.info(msg)
LOGGER.info(msg)
def _draw_stop(self, **kwargs):
pass
futures = futures_of(futures)
if not isinstance(futures, (set, list)):
futures = [futures]
LogProgressBar(futures)
|
py | 1a49374a2415ffe7e20cd98ddc67e8ad94b010ac | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualNetworkGatewaysOperations:
"""VirtualNetworkGatewaysOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_05_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.VirtualNetworkGateway",
**kwargs
) -> "models.VirtualNetworkGateway":
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VirtualNetworkGateway')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.VirtualNetworkGateway",
**kwargs
) -> AsyncLROPoller["models.VirtualNetworkGateway"]:
"""Creates or updates a virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to create or update virtual network gateway operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
async def get(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> "models.VirtualNetworkGateway":
"""Gets the specified virtual network gateway by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualNetworkGateway, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkGateway"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
async def _update_tags_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.TagsObject",
**kwargs
) -> Optional["models.VirtualNetworkGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_tags_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
async def begin_update_tags(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.TagsObject",
**kwargs
) -> AsyncLROPoller["models.VirtualNetworkGateway"]:
"""Updates a virtual network gateway tags.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to update virtual network gateway tags.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_tags_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["models.VirtualNetworkGatewayListResult"]:
"""Gets all virtual network gateways by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGatewayListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkGatewayListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways'} # type: ignore
def list_connections(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> AsyncIterable["models.VirtualNetworkGatewayListConnectionsResult"]:
"""Gets all the connections in a virtual network gateway.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualNetworkGatewayListConnectionsResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGatewayListConnectionsResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkGatewayListConnectionsResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_connections.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGatewayListConnectionsResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/connections'} # type: ignore
async def _reset_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
gateway_vip: Optional[str] = None,
**kwargs
) -> Optional["models.VirtualNetworkGateway"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.VirtualNetworkGateway"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._reset_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if gateway_vip is not None:
query_parameters['gatewayVip'] = self._serialize.query("gateway_vip", gateway_vip, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_reset_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
async def begin_reset(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
gateway_vip: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["models.VirtualNetworkGateway"]:
"""Resets the primary of the virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param gateway_vip: Virtual network gateway vip address supplied to the begin reset of the
active-active feature enabled gateway.
:type gateway_vip: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualNetworkGateway or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.VirtualNetworkGateway]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VirtualNetworkGateway"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
gateway_vip=gateway_vip,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VirtualNetworkGateway', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/reset'} # type: ignore
async def _reset_vpn_client_shared_key_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._reset_vpn_client_shared_key_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reset_vpn_client_shared_key_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'} # type: ignore
async def begin_reset_vpn_client_shared_key(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Resets the VPN client shared key of the virtual network gateway in the specified resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reset_vpn_client_shared_key_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reset_vpn_client_shared_key.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/resetvpnclientsharedkey'} # type: ignore
async def _generatevpnclientpackage_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.VpnClientParameters",
**kwargs
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generatevpnclientpackage_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generatevpnclientpackage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
async def begin_generatevpnclientpackage(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.VpnClientParameters",
**kwargs
) -> AsyncLROPoller[str]:
"""Generates VPN client package for P2S client of the virtual network gateway in the specified
resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._generatevpnclientpackage_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generatevpnclientpackage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnclientpackage'} # type: ignore
async def _generate_vpn_profile_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.VpnClientParameters",
**kwargs
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._generate_vpn_profile_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnClientParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_generate_vpn_profile_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'} # type: ignore
async def begin_generate_vpn_profile(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.VpnClientParameters",
**kwargs
) -> AsyncLROPoller[str]:
"""Generates VPN profile for P2S client of the virtual network gateway in the specified resource
group. Used for IKEV2 and radius based authentication.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Parameters supplied to the generate virtual network gateway VPN client
package operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnClientParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._generate_vpn_profile_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_generate_vpn_profile.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/generatevpnprofile'} # type: ignore
async def _get_vpn_profile_package_url_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_vpn_profile_package_url_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpn_profile_package_url_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'} # type: ignore
async def begin_get_vpn_profile_package_url(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> AsyncLROPoller[str]:
"""Gets pre-generated VPN profile for P2S client of the virtual network gateway in the specified
resource group. The profile needs to be generated first using generateVpnProfile.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_vpn_profile_package_url_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpn_profile_package_url.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnprofilepackageurl'} # type: ignore
async def _get_bgp_peer_status_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
peer: Optional[str] = None,
**kwargs
) -> Optional["models.BgpPeerStatusListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.BgpPeerStatusListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_bgp_peer_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if peer is not None:
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_bgp_peer_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
async def begin_get_bgp_peer_status(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
peer: Optional[str] = None,
**kwargs
) -> AsyncLROPoller["models.BgpPeerStatusListResult"]:
"""The GetBgpPeerStatus operation retrieves the status of all BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer to retrieve the status of.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either BgpPeerStatusListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.BgpPeerStatusListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.BgpPeerStatusListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_bgp_peer_status_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('BgpPeerStatusListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_bgp_peer_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getBgpPeerStatus'} # type: ignore
async def supported_vpn_devices(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> str:
"""Gets a xml format representation for supported vpn devices.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self.supported_vpn_devices.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
supported_vpn_devices.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/supportedvpndevices'} # type: ignore
async def _get_learned_routes_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> Optional["models.GatewayRouteListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_learned_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_learned_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
async def begin_get_learned_routes(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> AsyncLROPoller["models.GatewayRouteListResult"]:
"""This operation retrieves a list of routes the virtual network gateway has learned, including
routes learned from BGP peers.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_learned_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_learned_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getLearnedRoutes'} # type: ignore
async def _get_advertised_routes_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
peer: str,
**kwargs
) -> Optional["models.GatewayRouteListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.GatewayRouteListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_advertised_routes_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['peer'] = self._serialize.query("peer", peer, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_advertised_routes_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
async def begin_get_advertised_routes(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
peer: str,
**kwargs
) -> AsyncLROPoller["models.GatewayRouteListResult"]:
"""This operation retrieves a list of routes the virtual network gateway is advertising to the
specified peer.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param peer: The IP address of the peer.
:type peer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either GatewayRouteListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.GatewayRouteListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.GatewayRouteListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_advertised_routes_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
peer=peer,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('GatewayRouteListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_advertised_routes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getAdvertisedRoutes'} # type: ignore
async def _set_vpnclient_ipsec_parameters_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
vpnclient_ipsec_params: "models.VpnClientIPsecParameters",
**kwargs
) -> Optional["models.VpnClientIPsecParameters"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.VpnClientIPsecParameters"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._set_vpnclient_ipsec_parameters_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(vpnclient_ipsec_params, 'VpnClientIPsecParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_set_vpnclient_ipsec_parameters_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'} # type: ignore
async def begin_set_vpnclient_ipsec_parameters(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
vpnclient_ipsec_params: "models.VpnClientIPsecParameters",
**kwargs
) -> AsyncLROPoller["models.VpnClientIPsecParameters"]:
"""The Set VpnclientIpsecParameters operation sets the vpnclient ipsec policy for P2S client of
virtual network gateway in the specified resource group through Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param vpnclient_ipsec_params: Parameters supplied to the Begin Set vpnclient ipsec parameters
of Virtual Network Gateway P2S client operation through Network resource provider.
:type vpnclient_ipsec_params: ~azure.mgmt.network.v2020_05_01.models.VpnClientIPsecParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnClientIPsecParameters or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.VpnClientIPsecParameters]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnClientIPsecParameters"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._set_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
vpnclient_ipsec_params=vpnclient_ipsec_params,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_set_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/setvpnclientipsecparameters'} # type: ignore
async def _get_vpnclient_ipsec_parameters_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> "models.VpnClientIPsecParameters":
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnClientIPsecParameters"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_vpnclient_ipsec_parameters_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpnclient_ipsec_parameters_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'} # type: ignore
async def begin_get_vpnclient_ipsec_parameters(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> AsyncLROPoller["models.VpnClientIPsecParameters"]:
"""The Get VpnclientIpsecParameters operation retrieves information about the vpnclient ipsec
policy for P2S client of virtual network gateway in the specified resource group through
Network resource provider.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The virtual network gateway name.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnClientIPsecParameters or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.VpnClientIPsecParameters]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnClientIPsecParameters"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_vpnclient_ipsec_parameters_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientIPsecParameters', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpnclient_ipsec_parameters.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getvpnclientipsecparameters'} # type: ignore
async def vpn_device_configuration_script(
self,
resource_group_name: str,
virtual_network_gateway_connection_name: str,
parameters: "models.VpnDeviceScriptParameters",
**kwargs
) -> str:
"""Gets a xml format representation for vpn device configuration script.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_connection_name: The name of the virtual network gateway
connection for which the configuration script is generated.
:type virtual_network_gateway_connection_name: str
:param parameters: Parameters supplied to the generate vpn device script operation.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnDeviceScriptParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:return: str, or the result of cls(response)
:rtype: str
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[str]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.vpn_device_configuration_script.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayConnectionName': self._serialize.url("virtual_network_gateway_connection_name", virtual_network_gateway_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnDeviceScriptParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
vpn_device_configuration_script.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/connections/{virtualNetworkGatewayConnectionName}/vpndeviceconfigurationscript'} # type: ignore
async def _start_packet_capture_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: Optional["models.VpnPacketCaptureStartParameters"] = None,
**kwargs
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._start_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
if parameters is not None:
body_content = self._serialize.body(parameters, 'VpnPacketCaptureStartParameters')
else:
body_content = None
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_start_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/startPacketCapture'} # type: ignore
async def begin_start_packet_capture(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: Optional["models.VpnPacketCaptureStartParameters"] = None,
**kwargs
) -> AsyncLROPoller[str]:
"""Starts packet capture on virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Virtual network gateway packet capture parameters supplied to start packet
capture on gateway.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnPacketCaptureStartParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_packet_capture_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/startPacketCapture'} # type: ignore
async def _stop_packet_capture_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.VpnPacketCaptureStopParameters",
**kwargs
) -> Optional[str]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional[str]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._stop_packet_capture_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'VpnPacketCaptureStopParameters')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_stop_packet_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture'} # type: ignore
async def begin_stop_packet_capture(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
parameters: "models.VpnPacketCaptureStopParameters",
**kwargs
) -> AsyncLROPoller[str]:
"""Stops packet capture on virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param parameters: Virtual network gateway packet capture parameters supplied to stop packet
capture on gateway.
:type parameters: ~azure.mgmt.network.v2020_05_01.models.VpnPacketCaptureStopParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either str or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[str]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[str]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_packet_capture_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('str', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop_packet_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/stopPacketCapture'} # type: ignore
async def _get_vpnclient_connection_health_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> Optional["models.VpnClientConnectionHealthDetailListResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.VpnClientConnectionHealthDetailListResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
accept = "application/json"
# Construct URL
url = self._get_vpnclient_connection_health_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_vpnclient_connection_health_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getVpnClientConnectionHealth'} # type: ignore
async def begin_get_vpnclient_connection_health(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
**kwargs
) -> AsyncLROPoller["models.VpnClientConnectionHealthDetailListResult"]:
"""Get VPN client connection health detail per P2S client connection of the virtual network
gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VpnClientConnectionHealthDetailListResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_05_01.models.VpnClientConnectionHealthDetailListResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.VpnClientConnectionHealthDetailListResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._get_vpnclient_connection_health_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('VpnClientConnectionHealthDetailListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_vpnclient_connection_health.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/getVpnClientConnectionHealth'} # type: ignore
async def _disconnect_virtual_network_gateway_vpn_connections_initial(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
request: "models.P2SVpnConnectionRequest",
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-05-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._disconnect_virtual_network_gateway_vpn_connections_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'virtualNetworkGatewayName': self._serialize.url("virtual_network_gateway_name", virtual_network_gateway_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(request, 'P2SVpnConnectionRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_disconnect_virtual_network_gateway_vpn_connections_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/disconnectVirtualNetworkGatewayVpnConnections'} # type: ignore
async def begin_disconnect_virtual_network_gateway_vpn_connections(
self,
resource_group_name: str,
virtual_network_gateway_name: str,
request: "models.P2SVpnConnectionRequest",
**kwargs
) -> AsyncLROPoller[None]:
"""Disconnect vpn connections of virtual network gateway in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param virtual_network_gateway_name: The name of the virtual network gateway.
:type virtual_network_gateway_name: str
:param request: The parameters are supplied to disconnect vpn connections.
:type request: ~azure.mgmt.network.v2020_05_01.models.P2SVpnConnectionRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._disconnect_virtual_network_gateway_vpn_connections_initial(
resource_group_name=resource_group_name,
virtual_network_gateway_name=virtual_network_gateway_name,
request=request,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_disconnect_virtual_network_gateway_vpn_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/virtualNetworkGateways/{virtualNetworkGatewayName}/disconnectVirtualNetworkGatewayVpnConnections'} # type: ignore
|
py | 1a493761fc46af2782d9c2900c252ca2b0f074d9 | import pickle
import itertools
import os
import math
from sklearn.preprocessing import normalize
import re
from operator import add
import matplotlib.pyplot as plt
#%matplotlib inline
import numpy as np
import argparse
import pylab as pl
import random
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def normalize_by_row(arr):
row_sums = np.sqrt((arr*arr).sum(axis=1))
new_arr = arr / row_sums[:, np.newaxis]
return new_arr
def grep(pat, txt, ind):
r = re.search(pat, txt)
return int(r.group(1))
def compute_embds_matrix(path, M):
pkls = []
for root, dirs, files in os.walk(path):
if len(files) != 0:
pkls.extend([os.path.join(root, file) for file in files if file.endswith('.pkl')])
pkls.sort(key=lambda txt: grep(r"(\d+)_(\d+)\.pkl", txt, 1))
#print(pkls)
A_lst = []
for pkl in pkls:
print(pkl)
with open(pkl, 'rb') as handle:
samples = pickle.load(handle)
#keys = list(samples.keys())
#keys.sort(key=lambda txt: grep(r"(\d+)\.png", txt, 1))
#samples = [samples[key] for key in keys]
chunks = [normalize(np.asarray(samples[i:i+M]), axis=1, norm='l2') for i in range(0, len(samples), M)]
#print(chunks[0].shape)
#print(len(chunks))
A_lst.extend(chunks)
return A_lst
def monte_carlo(A_lst, I0, N, d):
Count = 0
for i in range(N):
#print('i={}'.format(i))
Ai = A_lst[i]
#print(I0)
AiT = np.transpose(Ai)
#print(np.matmul(I0, AiT))
theta_mat = np.arccos(np.matmul(I0, AiT)) / math.pi
theta_mat = theta_mat - np.ones(theta_mat.shape)*d
Count += np.sum(theta_mat <= 0)
#Pr += np.sum(np.exp(1-np.arccos(np.matmul(I0, AiT)) / math.pi))
return Count
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Sampling nearest neighbors')
parser.add_argument('--start', required=True, help='Start of the distance hard threshold', type=float)
parser.add_argument('--end', required=True, help='End of the distance hard threshold', type=float)
parser.add_argument('--step_size', required=True, help='Step size of the distance hard threshold', type=float)
parser.add_argument('--job_id', required=True, help='The id of the submitted job', type=str)
parser.add_argument('--sampling_path', required=True, help='The path of the saved embeddings', type=str)
parser.add_argument('--M', default=10000, help='The dimension of the tiled matrix', type=int)
parser.add_argument('--N', default=100, help='The number of tiled matrix', type=int)
parser.add_argument('--K', default=100, help='The number of anchor points', type=int)
parser.add_argument('--random_anchor', required=True, help='Whether we should get the anchor points by randomly sampling', type=str2bool)
args, other_args = parser.parse_known_args()
M = args.M
N = args.N
path = os.path.join(args.sampling_path, 'embds')
anchor_pt_dct = {}
if args.random_anchor:
indices = random.sample(range(M * N), args.K)
else:
with open(os.path.join(args.sampling_path, 'neighbors', 'clustered_indices.pkl'), 'rb') as handle:
indices = pickle.load(handle)
print('Loading indices from saved pickle file')
print(indices)
for i in indices:
pkl_dir = os.path.join(path, '{}_{}'.format((i // 1000000)*1000000, (i // 1000000 + 1)*1000000),
'{}_{}.pkl'.format((i // 10000)*10000, (i // 10000 + 1)*10000))
with open(pkl_dir, 'rb') as handle:
pkl = pickle.load(handle)
vec = pkl[i % 10000]
anchor_pt_dct[i] = vec / np.linalg.norm(vec)
ripley_dir = os.path.join(args.sampling_path, 'ripley')
if not os.path.exists(ripley_dir):
os.makedirs(ripley_dir)
A_lst = compute_embds_matrix(path, M)
file = open(os.path.join(ripley_dir, 'ripley_{}.txt'.format(args.job_id)), 'w')
for d in list(pl.frange(args.start,args.end,args.step_size)):
for k,v in anchor_pt_dct.items():
print(d)
v = v / np.linalg.norm(v)
v = v[np.newaxis,:]
count = monte_carlo(A_lst, v, N, d)
#Pr = (monte_carlo(A_lst, v, N)-10000000)/((np.e-1)*10000000)
result = '{}:\t{}:{}'.format(k, d, count)
print(result)
file.write(result+'\n')
file.close()
|
py | 1a49383eb169544c7eaed72080d81f4b9631bb6a | from typing import Dict
import pytest
import great_expectations.exceptions.exceptions as ge_exceptions
from great_expectations.data_context import DataContext
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.rule_based_profiler.helpers.util import (
get_parameter_value_and_validate_return_type,
)
from great_expectations.rule_based_profiler.parameter_builder import (
SimpleDateFormatStringParameterBuilder,
)
from great_expectations.rule_based_profiler.parameter_builder.simple_date_format_string_parameter_builder import (
DEFAULT_CANDIDATE_STRINGS,
)
from great_expectations.rule_based_profiler.types import Domain, ParameterContainer
def test_simple_date_format_parameter_builder_instantiation(
alice_columnar_table_single_batch_context,
):
data_context: DataContext = alice_columnar_table_single_batch_context
date_format_string_parameter: SimpleDateFormatStringParameterBuilder = (
SimpleDateFormatStringParameterBuilder(
name="my_simple_date_format_string_parameter_builder",
data_context=data_context,
)
)
assert date_format_string_parameter.threshold == 1.0
assert date_format_string_parameter.candidate_strings == DEFAULT_CANDIDATE_STRINGS
def test_simple_date_format_parameter_builder_zero_batch_id_error(
alice_columnar_table_single_batch_context,
):
data_context: DataContext = alice_columnar_table_single_batch_context
date_format_string_parameter: SimpleDateFormatStringParameterBuilder = (
SimpleDateFormatStringParameterBuilder(
name="my_simple_date_format_string_parameter_builder",
data_context=data_context,
)
)
domain: Domain = Domain(domain_type=MetricDomainTypes.COLUMN)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
with pytest.raises(ge_exceptions.ProfilerExecutionError) as e:
date_format_string_parameter.build_parameters(
domain=domain,
parameters=parameters,
)
assert (
str(e.value)
== "Utilizing a SimpleDateFormatStringParameterBuilder requires a non-empty list of batch identifiers."
)
def test_simple_date_format_parameter_builder_alice(
alice_columnar_table_single_batch_context,
):
data_context: DataContext = alice_columnar_table_single_batch_context
batch_request: dict = {
"datasource_name": "alice_columnar_table_single_batch_datasource",
"data_connector_name": "alice_columnar_table_single_batch_data_connector",
"data_asset_name": "alice_columnar_table_single_batch_data_asset",
}
metric_domain_kwargs = {"column": "event_ts"}
date_format_string_parameter: SimpleDateFormatStringParameterBuilder = (
SimpleDateFormatStringParameterBuilder(
name="my_date_format",
metric_domain_kwargs=metric_domain_kwargs,
batch_request=batch_request,
data_context=data_context,
)
)
assert date_format_string_parameter.candidate_strings == DEFAULT_CANDIDATE_STRINGS
assert date_format_string_parameter._threshold == 1.0
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN, domain_kwargs=metric_domain_kwargs
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
assert parameter_container.parameter_nodes is None
date_format_string_parameter.build_parameters(
domain=domain,
parameters=parameters,
)
# noinspection PyTypeChecker
assert len(parameter_container.parameter_nodes) == 1
fully_qualified_parameter_name_for_value: str = "$parameter.my_date_format"
expected_value: dict = {
"value": "%Y-%m-%d %H:%M:%S",
"details": {
"success_ratio": 1.0,
"candidate_strings": {
"%Y-%m-%d %H:%M:%S": 1.0,
"%y/%m/%d %H:%M:%S": 0.0,
"%y/%m/%d": 0.0,
"%y-%m-%d %H:%M:%S,%f %z": 0.0,
"%y-%m-%d %H:%M:%S,%f": 0.0,
"%y-%m-%d %H:%M:%S": 0.0,
"%y-%m-%d": 0.0,
"%y%m%d %H:%M:%S": 0.0,
"%m/%d/%y*%H:%M:%S": 0.0,
"%m/%d/%y %H:%M:%S %z": 0.0,
"%m/%d/%Y*%H:%M:%S*%f": 0.0,
"%m/%d/%Y*%H:%M:%S": 0.0,
"%m/%d/%Y %H:%M:%S %z": 0.0,
"%m/%d/%Y %H:%M:%S %p:%f": 0.0,
"%m/%d/%Y %H:%M:%S %p": 0.0,
"%m/%d/%Y": 0.0,
"%m-%d-%Y": 0.0,
"%m%d_%H:%M:%S.%f": 0.0,
"%m%d_%H:%M:%S": 0.0,
"%d/%m/%Y": 0.0,
"%d/%b/%Y:%H:%M:%S %z": 0.0,
"%d/%b/%Y:%H:%M:%S": 0.0,
"%d/%b/%Y %H:%M:%S": 0.0,
"%d/%b %H:%M:%S,%f": 0.0,
"%d-%m-%Y": 0.0,
"%d-%b-%Y %H:%M:%S.%f": 0.0,
"%d-%b-%Y %H:%M:%S": 0.0,
"%d %b %Y %H:%M:%S*%f": 0.0,
"%d %b %Y %H:%M:%S": 0.0,
"%b %d, %Y %H:%M:%S %p": 0.0,
"%b %d %Y %H:%M:%S": 0.0,
"%b %d %H:%M:%S %z %Y": 0.0,
"%b %d %H:%M:%S %z": 0.0,
"%b %d %H:%M:%S %Y": 0.0,
"%b %d %H:%M:%S": 0.0,
"%Y/%m/%d*%H:%M:%S": 0.0,
"%Y/%m/%d": 0.0,
"%Y-%m-%dT%z": 0.0,
"%Y-%m-%d*%H:%M:%S:%f": 0.0,
"%Y-%m-%d*%H:%M:%S": 0.0,
"%Y-%m-%d'T'%H:%M:%S.%f'%z'": 0.0,
"%Y-%m-%d'T'%H:%M:%S.%f": 0.0,
"%Y-%m-%d'T'%H:%M:%S'%z'": 0.0,
"%Y-%m-%d'T'%H:%M:%S%z": 0.0,
"%Y-%m-%d'T'%H:%M:%S": 0.0,
"%Y-%m-%d %H:%M:%S.%f%z": 0.0,
"%Y-%m-%d %H:%M:%S.%f": 0.0,
"%Y-%m-%d %H:%M:%S,%f%z": 0.0,
"%Y-%m-%d %H:%M:%S,%f": 0.0,
"%Y-%m-%d %H:%M:%S%z": 0.0,
"%Y-%m-%d %H:%M:%S %z": 0.0,
"%Y-%m-%d": 0.0,
"%Y%m%d %H:%M:%S.%f": 0.0,
"%Y %b %d %H:%M:%S.%f*%Z": 0.0,
"%Y %b %d %H:%M:%S.%f %Z": 0.0,
"%Y %b %d %H:%M:%S.%f": 0.0,
"%H:%M:%S.%f": 0.0,
"%H:%M:%S,%f": 0.0,
"%H:%M:%S": 0.0,
},
},
}
actual_value: dict = get_parameter_value_and_validate_return_type(
parameter_reference=fully_qualified_parameter_name_for_value,
expected_return_type=dict,
domain=domain,
parameters=parameters,
)
assert actual_value == expected_value
def test_simple_date_format_parameter_builder_bobby(
bobby_columnar_table_multi_batch_deterministic_data_context,
):
data_context: DataContext = (
bobby_columnar_table_multi_batch_deterministic_data_context
)
metric_domain_kwargs: dict = {"column": "pickup_datetime"}
candidate_strings: list[str] = [
"%Y-%m-%d",
"%Y-%m-%d %H:%M:%S",
]
threshold: float = 0.9
batch_request: dict = {
"datasource_name": "taxi_pandas",
"data_connector_name": "monthly",
"data_asset_name": "my_reports",
}
date_format_string_parameter: SimpleDateFormatStringParameterBuilder = (
SimpleDateFormatStringParameterBuilder(
name="my_simple_date_format_string_parameter_builder",
metric_domain_kwargs=metric_domain_kwargs,
candidate_strings=candidate_strings,
threshold=threshold,
batch_request=batch_request,
data_context=data_context,
)
)
assert date_format_string_parameter._candidate_strings == set(candidate_strings)
assert date_format_string_parameter._threshold == 0.9
domain: Domain = Domain(
domain_type=MetricDomainTypes.COLUMN, domain_kwargs=metric_domain_kwargs
)
parameter_container: ParameterContainer = ParameterContainer(parameter_nodes=None)
parameters: Dict[str, ParameterContainer] = {
domain.id: parameter_container,
}
assert parameter_container.parameter_nodes is None
date_format_string_parameter.build_parameters(
domain=domain,
parameters=parameters,
)
assert (
parameter_container.parameter_nodes is None
or len(parameter_container.parameter_nodes) == 1
)
fully_qualified_parameter_name_for_value: str = (
"$parameter.my_simple_date_format_string_parameter_builder.value"
)
expected_value: str = "%Y-%m-%d %H:%M:%S"
actual_value: str = get_parameter_value_and_validate_return_type(
parameter_reference=fully_qualified_parameter_name_for_value,
expected_return_type=str,
domain=domain,
parameters=parameters,
)
assert actual_value == expected_value
fully_qualified_parameter_name_for_meta: str = (
"$parameter.my_simple_date_format_string_parameter_builder.details"
)
expected_meta: dict = {
"success_ratio": 1.0,
"candidate_strings": {"%Y-%m-%d": 0.0, "%Y-%m-%d %H:%M:%S": 1.0},
}
meta: dict = get_parameter_value_and_validate_return_type(
parameter_reference=fully_qualified_parameter_name_for_meta,
expected_return_type=dict,
domain=domain,
parameters=parameters,
)
assert meta == expected_meta
|
py | 1a49384db1912b95569dbec1fa38f95acf55ceaf | #
# This is Seisflows
#
# See LICENCE file
#
###############################################################################
# Import system modules
import system
import subprocess
from glob import glob
from os.path import join
import sys
# Import Numpy
import numpy as np
# Local imports
import seisflows.plugins.solver.specfem3d_globe as solvertools
from seisflows.tools.seismic import getpar, setpar, Minmax
# from seisflows.plugins.io import loadbypar, copybin, loadbin, savebin
from seisflows.tools import unix
from seisflows.tools.seismic import call_solver
from seisflows.tools.tools import Struct, exists
from seisflows.config import ParameterError, custom_import
try:
PAR = sys.modules['seisflows_parameters']
PATH = sys.modules['seisflows_paths']
except:
print("Check parameters and paths.")
class specfem3d_globe(custom_import('solver', 'base')):
""" Python interface for SPECFEM3D_GLOBE
See base class for method descriptions
"""
try:
if PAR.MATERIALS in ['Isotropic']:
parameters = []
parameters += ['vp']
parameters += ['vs']
else:
parameters = []
parameters += ['vpv']
parameters += ['vph']
parameters += ['vsv']
parameters += ['vsh']
parameters += ['eta']
except:
print("Check parameters and paths.")
def check(self):
""" Checks parameters and paths
"""
super(specfem3d_globe, self).check()
if 'CHANNELS' not in PAR:
setattr(PAR, 'CHANNELS', 'ENZ')
# check data format
if 'FORMAT' not in PAR:
raise Exception()
def generate_data(self, **model_kwargs):
""" Generates data
"""
self.generate_mesh(**model_kwargs)
unix.cd(self.cwd)
setpar('SIMULATION_TYPE', '1')
setpar('SAVE_FORWARD', '.true.')
call_solver(system.mpiexec(), 'bin/xspecfem3D')
if PAR.FORMAT in ['ASCII', 'ascii']:
src = glob('OUTPUT_FILES/*.sem.ascii')
dst = 'traces/obs'
unix.mv(src, dst)
if PAR.SAVETRACES:
self.export_traces(PATH.OUTPUT+'/'+'traces/obs')
def generate_mesh(self, model_path=None, model_name=None,
model_type='gll'):
""" Performs meshing and database generation
"""
assert(model_name)
assert(model_type)
self.initialize_solver_directories()
unix.cd(self.cwd)
if model_type == 'gll':
assert (exists(model_path))
self.check_mesh_properties(model_path)
unix.cp(glob(model_path + '/' + '*'), self.model_databases)
call_solver(system.mpiexec(), 'bin/xmeshfem3D')
if self.taskid == 0:
self.export_model(PATH.OUTPUT + '/' + model_name)
else:
raise NotImplementedError
# Model input/output
def load(self, path, prefix='reg1_', suffix='', verbose=False):
""" reads SPECFEM model or kernel
Models are stored in Fortran binary format and separated into
multiple files according to material parameter and processor rank.
"""
raise NotImplementedError
model = Model(self.parameters)
minmax = Minmax(self.parameters)
for iproc in range(self.mesh_properties.nproc):
# read database files
keys, vals = loadbypar(path, self.parameters, iproc, prefix,
suffix)
for key, val in zip(keys, vals):
model[key] += [val]
minmax.update(keys, vals)
if verbose:
minmax.write(path, logpath=PATH.SUBMIT)
return model
def save(self, path, model, prefix='reg1_', suffix=''):
""" writes SPECFEM3D_GLOBE transerverly isotropic model
"""
unix.mkdir(path)
for iproc in range(self.mesh_properties.nproc):
for key in ['vpv', 'vph', 'vsv', 'vsh', 'eta']:
if key in self.parameters:
savebin(model[key][iproc], path, iproc, prefix+key+suffix)
elif 'kernel' in suffix:
pass
else:
src = PATH.OUTPUT + '/' + 'model_init'
dst = path
copybin(src, dst, iproc, prefix+key+suffix)
if 'rho' in self.parameters:
savebin(model['rho'][iproc], path, iproc, prefix+'rho'+suffix)
elif 'kernel' in suffix:
pass
else:
src = PATH.OUTPUT + '/' + 'model_init'
dst = path
copybin(src, dst, iproc, prefix+'rho'+suffix)
# Low-level solver interface
def forward(self, path='traces/syn'):
""" Calls SPECFEM3D_GLOBE forward solver
"""
solvertools.setpar('SIMULATION_TYPE', '1')
solvertools.setpar('SAVE_FORWARD', '.true.')
call_solver(system.mpiexec(), 'bin/xspecfem3D')
if PAR.FORMAT in ['ASCII', 'ascii']:
src = glob('OUTPUT_FILES/*.sem.ascii')
dst = path
unix.mv(src, dst)
def adjoint(self):
""" Calls SPECFEM3D_GLOBE adjoint solver
"""
solvertools.setpar('SIMULATION_TYPE', '3')
solvertools.setpar('SAVE_FORWARD', '.false.')
unix.rm('SEM')
unix.ln('traces/adj', 'SEM')
call_solver(system.mpiexec(), 'bin/xspecfem3D')
def check_mesh_properties(self, path=None, parameters=None):
if not hasattr(self, '_mesh_properties'):
if not path:
path = PATH.MODEL_INIT
if not parameters:
parameters = self.parameters
nproc = 0
ngll = []
while True:
dummy = loadbin(path, nproc, 'reg1_'+parameters[0])
ngll += [len(dummy)]
nproc += 1
if not exists('%s/proc%06d_reg1_%s.bin' % (path, nproc,
parameters[0])):
break
self._mesh_properties = Struct([
['nproc', nproc],
['ngll', ngll]])
return self._mesh_properties
def rename_data(self):
""" Works around conflicting data filename conventions
"""
files = glob(self.cwd + '/' + 'traces/adj/*sem.ascii')
unix.rename('sem.ascii', 'sem.ascii.adj', files)
def initialize_adjoint_traces(self):
super(specfem3d_globe, self).initialize_adjoint_traces()
# workaround for SPECFEM2D's use of different name conventions for
# regular traces and 'adjoint' traces
if PAR.FORMAT in ['ASCII', 'ascii']:
files = glob(self.cwd + '/' + 'traces/adj/*sem.ascii')
unix.rename('sem.ascii', 'adj', files)
# Miscellaneous
@property
def data_filenames(self):
unix.cd(self.cwd)
unix.cd('traces/obs')
print('made it here')
if PAR.FORMAT in ['ASCII', 'ascii']:
filenames = []
for channel in PAR.CHANNELS:
filenames += glob('*.??%s.sem.ascii' % channel)
return [filenames]
@property
def kernel_databases(self):
return join(self.cwd, 'OUTPUT_FILES/DATABASES_MPI')
@property
def model_databases(self):
return join(self.cwd, 'OUTPUT_FILES/DATABASES_MPI')
@property
def source_prefix(self):
return 'CMTSOLUTION'
|
py | 1a49388d8124395680c44ed00c41aa89df03a7df | """
DataFrame
---------
An efficient 2D container for potentially mixed-type time series or other
labeled data series.
Similar to its R counterpart, data.frame, except providing automatic data
alignment and a host of useful data manipulation methods having to do with the
labeling information
"""
import collections
from collections import abc
from io import StringIO
import itertools
import sys
from textwrap import dedent
from typing import (
IO,
TYPE_CHECKING,
Any,
FrozenSet,
Hashable,
Iterable,
List,
Optional,
Sequence,
Set,
Tuple,
Type,
Union,
cast,
)
import warnings
import numpy as np
import numpy.ma as ma
from pandas._config import get_option
from pandas._libs import algos as libalgos, lib
from pandas._typing import Axes, Axis, Dtype, FilePathOrBuffer, Level, Renamer
from pandas.compat import PY37
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.util._decorators import (
Appender,
Substitution,
deprecate_kwarg,
rewrite_axis_style_signature,
)
from pandas.util._validators import (
validate_axis_style_args,
validate_bool_kwarg,
validate_percentile,
)
from pandas.core.dtypes.cast import (
cast_scalar_to_array,
coerce_to_dtypes,
find_common_type,
infer_dtype_from_scalar,
invalidate_string_dtypes,
maybe_cast_to_datetime,
maybe_convert_platform,
maybe_downcast_to_dtype,
maybe_infer_to_datetimelike,
maybe_upcast,
maybe_upcast_putmask,
)
from pandas.core.dtypes.common import (
ensure_float64,
ensure_int64,
ensure_platform_int,
infer_dtype_from_object,
is_bool_dtype,
is_dict_like,
is_dtype_equal,
is_extension_array_dtype,
is_float_dtype,
is_hashable,
is_integer,
is_integer_dtype,
is_iterator,
is_list_like,
is_named_tuple,
is_object_dtype,
is_scalar,
is_sequence,
needs_i8_conversion,
)
from pandas.core.dtypes.generic import (
ABCDataFrame,
ABCIndexClass,
ABCMultiIndex,
ABCSeries,
)
from pandas.core.dtypes.missing import isna, notna
from pandas.core import algorithms, common as com, nanops, ops
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import Categorical, ExtensionArray
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin as DatetimeLikeArray
from pandas.core.arrays.sparse import SparseFrameAccessor
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.indexes import base as ibase
from pandas.core.indexes.api import Index, ensure_index, ensure_index_from_sequences
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.multi import maybe_droplevels
from pandas.core.indexes.period import PeriodIndex
from pandas.core.indexing import check_bool_indexer, convert_to_index_sliceable
from pandas.core.internals import BlockManager
from pandas.core.internals.construction import (
arrays_to_mgr,
get_names_from_index,
init_dict,
init_ndarray,
masked_rec_array_to_mgr,
reorder_arrays,
sanitize_index,
to_arrays,
)
from pandas.core.ops.missing import dispatch_fill_zeros
from pandas.core.series import Series
from pandas.io.common import get_filepath_or_buffer
from pandas.io.formats import console, format as fmt
from pandas.io.formats.printing import pprint_thing
import pandas.plotting
if TYPE_CHECKING:
from pandas.core.groupby.generic import DataFrameGroupBy
from pandas.io.formats.style import Styler
# ---------------------------------------------------------------------
# Docstring templates
_shared_doc_kwargs = dict(
axes="index, columns",
klass="DataFrame",
axes_single_arg="{0 or 'index', 1 or 'columns'}",
axis="""axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index': apply function to each column.
If 1 or 'columns': apply function to each row.""",
optional_by="""
by : str or list of str
Name or list of names to sort by.
- if `axis` is 0 or `'index'` then `by` may contain index
levels and/or column labels.
- if `axis` is 1 or `'columns'` then `by` may contain column
levels and/or index labels.
.. versionchanged:: 0.23.0
Allow specifying index or column level names.""",
versionadded_to_excel="",
optional_labels="""labels : array-like, optional
New labels / index to conform the axis specified by 'axis' to.""",
optional_axis="""axis : int or str, optional
Axis to target. Can be either the axis name ('index', 'columns')
or number (0, 1).""",
)
_numeric_only_doc = """numeric_only : boolean, default None
Include only float, int, boolean data. If None, will attempt to use
everything, then use only numeric data
"""
_merge_doc = """
Merge DataFrame or named Series objects with a database-style join.
The join is done on columns or indexes. If joining columns on
columns, the DataFrame indexes *will be ignored*. Otherwise if joining indexes
on indexes or indexes on a column or columns, the index will be passed on.
Parameters
----------%s
right : DataFrame or named Series
Object to merge with.
how : {'left', 'right', 'outer', 'inner'}, default 'inner'
Type of merge to be performed.
* left: use only keys from left frame, similar to a SQL left outer join;
preserve key order.
* right: use only keys from right frame, similar to a SQL right outer join;
preserve key order.
* outer: use union of keys from both frames, similar to a SQL full outer
join; sort keys lexicographically.
* inner: use intersection of keys from both frames, similar to a SQL inner
join; preserve the order of the left keys.
on : label or list
Column or index level names to join on. These must be found in both
DataFrames. If `on` is None and not merging on indexes then this defaults
to the intersection of the columns in both DataFrames.
left_on : label or list, or array-like
Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on : label or list, or array-like
Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index : bool, default False
Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index
or a number of columns) must match the number of levels.
right_index : bool, default False
Use the index from the right DataFrame as the join key. Same caveats as
left_index.
sort : bool, default False
Sort the join keys lexicographically in the result DataFrame. If False,
the order of the join keys depends on the join type (how keyword).
suffixes : tuple of (str, str), default ('_x', '_y')
Suffix to apply to overlapping column names in the left and right
side, respectively. To raise an exception on overlapping columns use
(False, False).
copy : bool, default True
If False, avoid copy if possible.
indicator : bool or str, default False
If True, adds a column to output DataFrame called "_merge" with
information on the source of each row.
If string, column with information on source of each row will be added to
output DataFrame, and column will be named value of string.
Information column is Categorical-type and takes on a value of "left_only"
for observations whose merge key only appears in 'left' DataFrame,
"right_only" for observations whose merge key only appears in 'right'
DataFrame, and "both" if the observation's merge key is found in both.
validate : str, optional
If specified, checks if merge is of specified type.
* "one_to_one" or "1:1": check if merge keys are unique in both
left and right datasets.
* "one_to_many" or "1:m": check if merge keys are unique in left
dataset.
* "many_to_one" or "m:1": check if merge keys are unique in right
dataset.
* "many_to_many" or "m:m": allowed, but does not result in checks.
.. versionadded:: 0.21.0
Returns
-------
DataFrame
A DataFrame of the two merged objects.
See Also
--------
merge_ordered : Merge with optional filling/interpolation.
merge_asof : Merge on nearest keys.
DataFrame.join : Similar method using indices.
Notes
-----
Support for specifying index levels as the `on`, `left_on`, and
`right_on` parameters was added in version 0.23.0
Support for merging named Series objects was added in version 0.24.0
Examples
--------
>>> df1 = pd.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]})
>>> df2 = pd.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]})
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> df1.merge(df2, left_on='lkey', right_on='rkey')
lkey value_x rkey value_y
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2 with specified left and right suffixes
appended to any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey',
... suffixes=('_left', '_right'))
lkey value_left rkey value_right
0 foo 1 foo 5
1 foo 1 foo 8
2 foo 5 foo 5
3 foo 5 foo 8
4 bar 2 bar 6
5 baz 3 baz 7
Merge DataFrames df1 and df2, but raise an exception if the DataFrames have
any overlapping columns.
>>> df1.merge(df2, left_on='lkey', right_on='rkey', suffixes=(False, False))
Traceback (most recent call last):
...
ValueError: columns overlap but no suffix specified:
Index(['value'], dtype='object')
"""
# -----------------------------------------------------------------------
# DataFrame class
class DataFrame(NDFrame):
"""
Two-dimensional, size-mutable, potentially heterogeneous tabular data.
Data structure also contains labeled axes (rows and columns).
Arithmetic operations align on both row and column labels. Can be
thought of as a dict-like container for Series objects. The primary
pandas data structure.
Parameters
----------
data : ndarray (structured or homogeneous), Iterable, dict, or DataFrame
Dict can contain Series, arrays, constants, or list-like objects.
.. versionchanged:: 0.23.0
If data is a dict, column order follows insertion-order for
Python 3.6 and later.
.. versionchanged:: 0.25.0
If data is a list of dicts, column order follows insertion-order
for Python 3.6 and later.
index : Index or array-like
Index to use for resulting frame. Will default to RangeIndex if
no indexing information part of input data and no index provided.
columns : Index or array-like
Column labels to use for resulting frame. Will default to
RangeIndex (0, 1, 2, ..., n) if no column labels are provided.
dtype : dtype, default None
Data type to force. Only a single dtype is allowed. If None, infer.
copy : bool, default False
Copy data from inputs. Only affects DataFrame / 2d ndarray input.
See Also
--------
DataFrame.from_records : Constructor from tuples, also record arrays.
DataFrame.from_dict : From dicts of Series, arrays, or dicts.
read_csv
read_table
read_clipboard
Examples
--------
Constructing DataFrame from a dictionary.
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df
col1 col2
0 1 3
1 2 4
Notice that the inferred dtype is int64.
>>> df.dtypes
col1 int64
col2 int64
dtype: object
To enforce a single dtype:
>>> df = pd.DataFrame(data=d, dtype=np.int8)
>>> df.dtypes
col1 int8
col2 int8
dtype: object
Constructing DataFrame from numpy ndarray:
>>> df2 = pd.DataFrame(np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
... columns=['a', 'b', 'c'])
>>> df2
a b c
0 1 2 3
1 4 5 6
2 7 8 9
"""
_typ = "dataframe"
@property
def _constructor(self) -> Type["DataFrame"]:
return DataFrame
_constructor_sliced: Type[Series] = Series
_deprecations: FrozenSet[str] = NDFrame._deprecations | frozenset([])
_accessors: Set[str] = {"sparse"}
@property
def _constructor_expanddim(self):
raise NotImplementedError("Not supported for DataFrames!")
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data=None,
index: Optional[Axes] = None,
columns: Optional[Axes] = None,
dtype: Optional[Dtype] = None,
copy: bool = False,
):
if data is None:
data = {}
if dtype is not None:
dtype = self._validate_dtype(dtype)
if isinstance(data, DataFrame):
data = data._data
if isinstance(data, BlockManager):
mgr = self._init_mgr(
data, axes=dict(index=index, columns=columns), dtype=dtype, copy=copy
)
elif isinstance(data, dict):
mgr = init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, ma.MaskedArray):
import numpy.ma.mrecords as mrecords
# masked recarray
if isinstance(data, mrecords.MaskedRecords):
mgr = masked_rec_array_to_mgr(data, index, columns, dtype, copy)
# a masked array
else:
mask = ma.getmaskarray(data)
if mask.any():
data, fill_value = maybe_upcast(data, copy=True)
data.soften_mask() # set hardmask False if it was True
data[mask] = fill_value
else:
data = data.copy()
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
elif isinstance(data, (np.ndarray, Series, Index)):
if data.dtype.names:
data_columns = list(data.dtype.names)
data = {k: data[k] for k in data_columns}
if columns is None:
columns = data_columns
mgr = init_dict(data, index, columns, dtype=dtype)
elif getattr(data, "name", None) is not None:
mgr = init_dict({data.name: data}, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
# For data is list-like, or Iterable (will consume into list)
elif isinstance(data, abc.Iterable) and not isinstance(data, (str, bytes)):
if not isinstance(data, (abc.Sequence, ExtensionArray)):
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], "ndim", 1) == 1:
if is_named_tuple(data[0]) and columns is None:
columns = data[0]._fields
arrays, columns = to_arrays(data, columns, dtype=dtype)
columns = ensure_index(columns)
# set the index
if index is None:
if isinstance(data[0], Series):
index = get_names_from_index(data)
elif isinstance(data[0], Categorical):
index = ibase.default_index(len(data[0]))
else:
index = ibase.default_index(len(data))
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
else:
mgr = init_ndarray(data, index, columns, dtype=dtype, copy=copy)
else:
mgr = init_dict({}, index, columns, dtype=dtype)
else:
try:
arr = np.array(data, dtype=dtype, copy=copy)
except (ValueError, TypeError) as e:
exc = TypeError(
"DataFrame constructor called with "
f"incompatible data and dtype: {e}"
)
raise exc from e
if arr.ndim == 0 and index is not None and columns is not None:
values = cast_scalar_to_array(
(len(index), len(columns)), data, dtype=dtype
)
mgr = init_ndarray(
values, index, columns, dtype=values.dtype, copy=False
)
else:
raise ValueError("DataFrame constructor not properly called!")
NDFrame.__init__(self, mgr, fastpath=True)
# ----------------------------------------------------------------------
@property
def axes(self) -> List[Index]:
"""
Return a list representing the axes of the DataFrame.
It has the row axis labels and column axis labels as the only members.
They are returned in that order.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.axes
[RangeIndex(start=0, stop=2, step=1), Index(['col1', 'col2'],
dtype='object')]
"""
return [self.index, self.columns]
@property
def shape(self) -> Tuple[int, int]:
"""
Return a tuple representing the dimensionality of the DataFrame.
See Also
--------
ndarray.shape
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.shape
(2, 2)
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4],
... 'col3': [5, 6]})
>>> df.shape
(2, 3)
"""
return len(self.index), len(self.columns)
@property
def _is_homogeneous_type(self) -> bool:
"""
Whether all the columns in a DataFrame have the same type.
Returns
-------
bool
See Also
--------
Index._is_homogeneous_type : Whether the object has a single
dtype.
MultiIndex._is_homogeneous_type : Whether all the levels of a
MultiIndex have the same dtype.
Examples
--------
>>> DataFrame({"A": [1, 2], "B": [3, 4]})._is_homogeneous_type
True
>>> DataFrame({"A": [1, 2], "B": [3.0, 4.0]})._is_homogeneous_type
False
Items with the same type but different sizes are considered
different types.
>>> DataFrame({
... "A": np.array([1, 2], dtype=np.int32),
... "B": np.array([1, 2], dtype=np.int64)})._is_homogeneous_type
False
"""
if self._data.any_extension_types:
return len({block.dtype for block in self._data.blocks}) == 1
else:
return not self._data.is_mixed_type
# ----------------------------------------------------------------------
# Rendering Methods
def _repr_fits_vertical_(self) -> bool:
"""
Check length against max_rows.
"""
max_rows = get_option("display.max_rows")
return len(self) <= max_rows
def _repr_fits_horizontal_(self, ignore_width: bool = False) -> bool:
"""
Check if full repr fits in horizontal boundaries imposed by the display
options width and max_columns.
In case off non-interactive session, no boundaries apply.
`ignore_width` is here so ipnb+HTML output can behave the way
users expect. display.max_columns remains in effect.
GH3541, GH3573
"""
width, height = console.get_console_size()
max_columns = get_option("display.max_columns")
nb_columns = len(self.columns)
# exceed max columns
if (max_columns and nb_columns > max_columns) or (
(not ignore_width) and width and nb_columns > (width // 2)
):
return False
# used by repr_html under IPython notebook or scripts ignore terminal
# dims
if ignore_width or not console.in_interactive_session():
return True
if get_option("display.width") is not None or console.in_ipython_frontend():
# check at least the column row for excessive width
max_rows = 1
else:
max_rows = get_option("display.max_rows")
# when auto-detecting, so width=None and not in ipython front end
# check whether repr fits horizontal by actually checking
# the width of the rendered repr
buf = StringIO()
# only care about the stuff we'll actually print out
# and to_string on entire frame may be expensive
d = self
if not (max_rows is None): # unlimited rows
# min of two, where one may be None
d = d.iloc[: min(max_rows, len(d))]
else:
return True
d.to_string(buf=buf)
value = buf.getvalue()
repr_width = max(len(l) for l in value.split("\n"))
return repr_width < width
def _info_repr(self) -> bool:
"""
True if the repr should show the info view.
"""
info_repr_option = get_option("display.large_repr") == "info"
return info_repr_option and not (
self._repr_fits_horizontal_() and self._repr_fits_vertical_()
)
def __repr__(self) -> str:
"""
Return a string representation for a particular DataFrame.
"""
buf = StringIO("")
if self._info_repr():
self.info(buf=buf)
return buf.getvalue()
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
max_colwidth = get_option("display.max_colwidth")
show_dimensions = get_option("display.show_dimensions")
if get_option("display.expand_frame_repr"):
width, _ = console.get_console_size()
else:
width = None
self.to_string(
buf=buf,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
line_width=width,
max_colwidth=max_colwidth,
show_dimensions=show_dimensions,
)
return buf.getvalue()
def _repr_html_(self) -> Optional[str]:
"""
Return a html representation for a particular DataFrame.
Mainly for IPython notebook.
"""
if self._info_repr():
buf = StringIO("")
self.info(buf=buf)
# need to escape the <class>, should be the first line.
val = buf.getvalue().replace("<", r"<", 1)
val = val.replace(">", r">", 1)
return "<pre>" + val + "</pre>"
if get_option("display.notebook_repr_html"):
max_rows = get_option("display.max_rows")
min_rows = get_option("display.min_rows")
max_cols = get_option("display.max_columns")
show_dimensions = get_option("display.show_dimensions")
formatter = fmt.DataFrameFormatter(
self,
columns=None,
col_space=None,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
justify=None,
index_names=True,
header=True,
index=True,
bold_rows=True,
escape=True,
max_rows=max_rows,
min_rows=min_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=".",
table_id=None,
render_links=False,
)
return formatter.to_html(notebook=True)
else:
return None
@Substitution(
header_type="bool or sequence",
header="Write out the column names. If a list of strings "
"is given, it is assumed to be aliases for the "
"column names",
col_space_type="int",
col_space="The minimum width of each column",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_string(
self,
buf: Optional[FilePathOrBuffer[str]] = None,
columns: Optional[Sequence[str]] = None,
col_space: Optional[int] = None,
header: Union[bool, Sequence[str]] = True,
index: bool = True,
na_rep: str = "NaN",
formatters: Optional[fmt.formatters_type] = None,
float_format: Optional[fmt.float_format_type] = None,
sparsify: Optional[bool] = None,
index_names: bool = True,
justify: Optional[str] = None,
max_rows: Optional[int] = None,
min_rows: Optional[int] = None,
max_cols: Optional[int] = None,
show_dimensions: bool = False,
decimal: str = ".",
line_width: Optional[int] = None,
max_colwidth: Optional[int] = None,
encoding: Optional[str] = None,
) -> Optional[str]:
"""
Render a DataFrame to a console-friendly tabular output.
%(shared_params)s
line_width : int, optional
Width to wrap a line in characters.
max_colwidth : int, optional
Max width to truncate each column in characters. By default, no limit.
.. versionadded:: 1.0.0
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
%(returns)s
See Also
--------
to_html : Convert DataFrame to HTML.
Examples
--------
>>> d = {'col1': [1, 2, 3], 'col2': [4, 5, 6]}
>>> df = pd.DataFrame(d)
>>> print(df.to_string())
col1 col2
0 1 4
1 2 5
2 3 6
"""
from pandas import option_context
with option_context("display.max_colwidth", max_colwidth):
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
min_rows=min_rows,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
line_width=line_width,
)
return formatter.to_string(buf=buf, encoding=encoding)
# ----------------------------------------------------------------------
@property
def style(self) -> "Styler":
"""
Returns a Styler object.
Contains methods for building a styled HTML representation of the DataFrame.
a styled HTML representation fo the DataFrame.
See Also
--------
io.formats.style.Styler
"""
from pandas.io.formats.style import Styler
return Styler(self)
_shared_docs[
"items"
] = r"""
Iterate over (column name, Series) pairs.
Iterates over the DataFrame columns, returning a tuple with
the column name and the content as a Series.
Yields
------
label : object
The column names for the DataFrame being iterated over.
content : Series
The column entries belonging to each label, as a Series.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as
(index, Series) pairs.
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples
of the values.
Examples
--------
>>> df = pd.DataFrame({'species': ['bear', 'bear', 'marsupial'],
... 'population': [1864, 22000, 80000]},
... index=['panda', 'polar', 'koala'])
>>> df
species population
panda bear 1864
polar bear 22000
koala marsupial 80000
>>> for label, content in df.items():
... print('label:', label)
... print('content:', content, sep='\n')
...
label: species
content:
panda bear
polar bear
koala marsupial
Name: species, dtype: object
label: population
content:
panda 1864
polar 22000
koala 80000
Name: population, dtype: int64
"""
@Appender(_shared_docs["items"])
def items(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
if self.columns.is_unique and hasattr(self, "_item_cache"):
for k in self.columns:
yield k, self._get_item_cache(k)
else:
for i, k in enumerate(self.columns):
yield k, self._ixs(i, axis=1)
@Appender(_shared_docs["items"])
def iteritems(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
yield from self.items()
def iterrows(self) -> Iterable[Tuple[Optional[Hashable], Series]]:
"""
Iterate over DataFrame rows as (index, Series) pairs.
Yields
------
index : label or tuple of label
The index of the row. A tuple for a `MultiIndex`.
data : Series
The data of the row as a Series.
it : generator
A generator that iterates over the rows of the frame.
See Also
--------
DataFrame.itertuples : Iterate over DataFrame rows as namedtuples of the values.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
1. Because ``iterrows`` returns a Series for each row,
it does **not** preserve dtypes across the rows (dtypes are
preserved across columns for DataFrames). For example,
>>> df = pd.DataFrame([[1, 1.5]], columns=['int', 'float'])
>>> row = next(df.iterrows())[1]
>>> row
int 1.0
float 1.5
Name: 0, dtype: float64
>>> print(row['int'].dtype)
float64
>>> print(df['int'].dtype)
int64
To preserve dtypes while iterating over the rows, it is better
to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster than ``iterrows``.
2. You should **never modify** something you are iterating over.
This is not guaranteed to work in all cases. Depending on the
data types, the iterator returns a copy and not a view, and writing
to it will have no effect.
"""
columns = self.columns
klass = self._constructor_sliced
for k, v in zip(self.index, self.values):
s = klass(v, index=columns, name=k)
yield k, s
def itertuples(self, index=True, name="Pandas"):
"""
Iterate over DataFrame rows as namedtuples.
Parameters
----------
index : bool, default True
If True, return the index as the first element of the tuple.
name : str or None, default "Pandas"
The name of the returned namedtuples or None to return regular
tuples.
Returns
-------
iterator
An object to iterate over namedtuples for each row in the
DataFrame with the first field possibly being the index and
following fields being the column values.
See Also
--------
DataFrame.iterrows : Iterate over DataFrame rows as (index, Series)
pairs.
DataFrame.items : Iterate over (column name, Series) pairs.
Notes
-----
The column names will be renamed to positional names if they are
invalid Python identifiers, repeated, or start with an underscore.
On python versions < 3.7 regular tuples are returned for DataFrames
with a large number of columns (>254).
Examples
--------
>>> df = pd.DataFrame({'num_legs': [4, 2], 'num_wings': [0, 2]},
... index=['dog', 'hawk'])
>>> df
num_legs num_wings
dog 4 0
hawk 2 2
>>> for row in df.itertuples():
... print(row)
...
Pandas(Index='dog', num_legs=4, num_wings=0)
Pandas(Index='hawk', num_legs=2, num_wings=2)
By setting the `index` parameter to False we can remove the index
as the first element of the tuple:
>>> for row in df.itertuples(index=False):
... print(row)
...
Pandas(num_legs=4, num_wings=0)
Pandas(num_legs=2, num_wings=2)
With the `name` parameter set we set a custom name for the yielded
namedtuples:
>>> for row in df.itertuples(name='Animal'):
... print(row)
...
Animal(Index='dog', num_legs=4, num_wings=0)
Animal(Index='hawk', num_legs=2, num_wings=2)
"""
arrays = []
fields = list(self.columns)
if index:
arrays.append(self.index)
fields.insert(0, "Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
# Python versions before 3.7 support at most 255 arguments to constructors
can_return_named_tuples = PY37 or len(self.columns) + index < 255
if name is not None and can_return_named_tuples:
itertuple = collections.namedtuple(name, fields, rename=True)
return map(itertuple._make, zip(*arrays))
# fallback to regular tuples
return zip(*arrays)
def __len__(self) -> int:
"""
Returns length of info axis, but here we use the index.
"""
return len(self.index)
def dot(self, other):
"""
Compute the matrix multiplication between the DataFrame and other.
This method computes the matrix product between the DataFrame and the
values of an other Series, DataFrame or a numpy array.
It can also be called using ``self @ other`` in Python >= 3.5.
Parameters
----------
other : Series, DataFrame or array-like
The other object to compute the matrix product with.
Returns
-------
Series or DataFrame
If other is a Series, return the matrix product between self and
other as a Serie. If other is a DataFrame or a numpy.array, return
the matrix product of self and other in a DataFrame of a np.array.
See Also
--------
Series.dot: Similar method for Series.
Notes
-----
The dimensions of DataFrame and other must be compatible in order to
compute the matrix multiplication. In addition, the column names of
DataFrame and the index of other must contain the same values, as they
will be aligned prior to the multiplication.
The dot method for Series computes the inner product, instead of the
matrix product here.
Examples
--------
Here we multiply a DataFrame with a Series.
>>> df = pd.DataFrame([[0, 1, -2, -1], [1, 1, 1, 1]])
>>> s = pd.Series([1, 1, 2, 1])
>>> df.dot(s)
0 -4
1 5
dtype: int64
Here we multiply a DataFrame with another DataFrame.
>>> other = pd.DataFrame([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(other)
0 1
0 1 4
1 2 2
Note that the dot method give the same result as @
>>> df @ other
0 1
0 1 4
1 2 2
The dot method works also if other is an np.array.
>>> arr = np.array([[0, 1], [1, 2], [-1, -1], [2, 0]])
>>> df.dot(arr)
0 1
0 1 4
1 2 2
Note how shuffling of the objects does not change the result.
>>> s2 = s.reindex([1, 0, 2, 3])
>>> df.dot(s2)
0 -4
1 5
dtype: int64
"""
if isinstance(other, (Series, DataFrame)):
common = self.columns.union(other.index)
if len(common) > len(self.columns) or len(common) > len(other.index):
raise ValueError("matrices are not aligned")
left = self.reindex(columns=common, copy=False)
right = other.reindex(index=common, copy=False)
lvals = left.values
rvals = right.values
else:
left = self
lvals = self.values
rvals = np.asarray(other)
if lvals.shape[1] != rvals.shape[0]:
raise ValueError(
f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}"
)
if isinstance(other, DataFrame):
return self._constructor(
np.dot(lvals, rvals), index=left.index, columns=other.columns
)
elif isinstance(other, Series):
return Series(np.dot(lvals, rvals), index=left.index)
elif isinstance(rvals, (np.ndarray, Index)):
result = np.dot(lvals, rvals)
if result.ndim == 2:
return self._constructor(result, index=left.index)
else:
return Series(result, index=left.index)
else: # pragma: no cover
raise TypeError(f"unsupported type: {type(other)}")
def __matmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.dot(other)
def __rmatmul__(self, other):
"""
Matrix multiplication using binary `@` operator in Python>=3.5.
"""
return self.T.dot(np.transpose(other)).T
# ----------------------------------------------------------------------
# IO methods (to / from other formats)
@classmethod
def from_dict(cls, data, orient="columns", dtype=None, columns=None) -> "DataFrame":
"""
Construct DataFrame from dict of array-like or dicts.
Creates DataFrame object from dictionary by columns or by index
allowing dtype specification.
Parameters
----------
data : dict
Of the form {field : array-like} or {field : dict}.
orient : {'columns', 'index'}, default 'columns'
The "orientation" of the data. If the keys of the passed dict
should be the columns of the resulting DataFrame, pass 'columns'
(default). Otherwise if the keys should be rows, pass 'index'.
dtype : dtype, default None
Data type to force, otherwise infer.
columns : list, default None
Column labels to use when ``orient='index'``. Raises a ValueError
if used with ``orient='columns'``.
.. versionadded:: 0.23.0
Returns
-------
DataFrame
See Also
--------
DataFrame.from_records : DataFrame from ndarray (structured
dtype), list of tuples, dict, or DataFrame.
DataFrame : DataFrame object creation using constructor.
Examples
--------
By default the keys of the dict become the DataFrame columns:
>>> data = {'col_1': [3, 2, 1, 0], 'col_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data)
col_1 col_2
0 3 a
1 2 b
2 1 c
3 0 d
Specify ``orient='index'`` to create the DataFrame using dictionary
keys as rows:
>>> data = {'row_1': [3, 2, 1, 0], 'row_2': ['a', 'b', 'c', 'd']}
>>> pd.DataFrame.from_dict(data, orient='index')
0 1 2 3
row_1 3 2 1 0
row_2 a b c d
When using the 'index' orientation, the column names can be
specified manually:
>>> pd.DataFrame.from_dict(data, orient='index',
... columns=['A', 'B', 'C', 'D'])
A B C D
row_1 3 2 1 0
row_2 a b c d
"""
index = None
orient = orient.lower()
if orient == "index":
if len(data) > 0:
# TODO speed up Series case
if isinstance(list(data.values())[0], (Series, dict)):
data = _from_nested_dict(data)
else:
data, index = list(data.values()), list(data.keys())
elif orient == "columns":
if columns is not None:
raise ValueError("cannot use columns parameter with orient='columns'")
else: # pragma: no cover
raise ValueError("only recognize index or columns for orient")
return cls(data, index=index, columns=columns, dtype=dtype)
def to_numpy(self, dtype=None, copy=False) -> np.ndarray:
"""
Convert the DataFrame to a NumPy array.
.. versionadded:: 0.24.0
By default, the dtype of the returned array will be the common NumPy
dtype of all types in the DataFrame. For example, if the dtypes are
``float16`` and ``float32``, the results dtype will be ``float32``.
This may require copying data and coercing values, which may be
expensive.
Parameters
----------
dtype : str or numpy.dtype, optional
The dtype to pass to :meth:`numpy.asarray`.
copy : bool, default False
Whether to ensure that the returned value is a not a view on
another array. Note that ``copy=False`` does not *ensure* that
``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
a copy is made, even if not strictly necessary.
Returns
-------
numpy.ndarray
See Also
--------
Series.to_numpy : Similar method for Series.
Examples
--------
>>> pd.DataFrame({"A": [1, 2], "B": [3, 4]}).to_numpy()
array([[1, 3],
[2, 4]])
With heterogeneous data, the lowest common type will have to
be used.
>>> df = pd.DataFrame({"A": [1, 2], "B": [3.0, 4.5]})
>>> df.to_numpy()
array([[1. , 3. ],
[2. , 4.5]])
For a mix of numeric and non-numeric types, the output array will
have object dtype.
>>> df['C'] = pd.date_range('2000', periods=2)
>>> df.to_numpy()
array([[1, 3.0, Timestamp('2000-01-01 00:00:00')],
[2, 4.5, Timestamp('2000-01-02 00:00:00')]], dtype=object)
"""
result = np.array(self.values, dtype=dtype, copy=copy)
return result
def to_dict(self, orient="dict", into=dict):
"""
Convert the DataFrame to a dictionary.
The type of the key-value pairs can be customized with the parameters
(see below).
Parameters
----------
orient : str {'dict', 'list', 'series', 'split', 'records', 'index'}
Determines the type of the values of the dictionary.
- 'dict' (default) : dict like {column -> {index -> value}}
- 'list' : dict like {column -> [values]}
- 'series' : dict like {column -> Series(values)}
- 'split' : dict like
{'index' -> [index], 'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
Abbreviations are allowed. `s` indicates `series` and `sp`
indicates `split`.
into : class, default dict
The collections.abc.Mapping subclass used for all Mappings
in the return value. Can be the actual class or an empty
instance of the mapping type you want. If you want a
collections.defaultdict, you must pass it initialized.
.. versionadded:: 0.21.0
Returns
-------
dict, list or collections.abc.Mapping
Return a collections.abc.Mapping object representing the DataFrame.
The resulting transformation depends on the `orient` parameter.
See Also
--------
DataFrame.from_dict: Create a DataFrame from a dictionary.
DataFrame.to_json: Convert a DataFrame to JSON format.
Examples
--------
>>> df = pd.DataFrame({'col1': [1, 2],
... 'col2': [0.5, 0.75]},
... index=['row1', 'row2'])
>>> df
col1 col2
row1 1 0.50
row2 2 0.75
>>> df.to_dict()
{'col1': {'row1': 1, 'row2': 2}, 'col2': {'row1': 0.5, 'row2': 0.75}}
You can specify the return orientation.
>>> df.to_dict('series')
{'col1': row1 1
row2 2
Name: col1, dtype: int64,
'col2': row1 0.50
row2 0.75
Name: col2, dtype: float64}
>>> df.to_dict('split')
{'index': ['row1', 'row2'], 'columns': ['col1', 'col2'],
'data': [[1, 0.5], [2, 0.75]]}
>>> df.to_dict('records')
[{'col1': 1, 'col2': 0.5}, {'col1': 2, 'col2': 0.75}]
>>> df.to_dict('index')
{'row1': {'col1': 1, 'col2': 0.5}, 'row2': {'col1': 2, 'col2': 0.75}}
You can also specify the mapping type.
>>> from collections import OrderedDict, defaultdict
>>> df.to_dict(into=OrderedDict)
OrderedDict([('col1', OrderedDict([('row1', 1), ('row2', 2)])),
('col2', OrderedDict([('row1', 0.5), ('row2', 0.75)]))])
If you want a `defaultdict`, you need to initialize it:
>>> dd = defaultdict(list)
>>> df.to_dict('records', into=dd)
[defaultdict(<class 'list'>, {'col1': 1, 'col2': 0.5}),
defaultdict(<class 'list'>, {'col1': 2, 'col2': 0.75})]
"""
if not self.columns.is_unique:
warnings.warn(
"DataFrame columns are not unique, some columns will be omitted.",
UserWarning,
stacklevel=2,
)
# GH16122
into_c = com.standardize_mapping(into)
if orient.lower().startswith("d"):
return into_c((k, v.to_dict(into)) for k, v in self.items())
elif orient.lower().startswith("l"):
return into_c((k, v.tolist()) for k, v in self.items())
elif orient.lower().startswith("sp"):
return into_c(
(
("index", self.index.tolist()),
("columns", self.columns.tolist()),
(
"data",
[
list(map(com.maybe_box_datetimelike, t))
for t in self.itertuples(index=False, name=None)
],
),
)
)
elif orient.lower().startswith("s"):
return into_c((k, com.maybe_box_datetimelike(v)) for k, v in self.items())
elif orient.lower().startswith("r"):
columns = self.columns.tolist()
rows = (
dict(zip(columns, row))
for row in self.itertuples(index=False, name=None)
)
return [
into_c((k, com.maybe_box_datetimelike(v)) for k, v in row.items())
for row in rows
]
elif orient.lower().startswith("i"):
if not self.index.is_unique:
raise ValueError("DataFrame index must be unique for orient='index'.")
return into_c(
(t[0], dict(zip(self.columns, t[1:])))
for t in self.itertuples(name=None)
)
else:
raise ValueError(f"orient '{orient}' not understood")
def to_gbq(
self,
destination_table,
project_id=None,
chunksize=None,
reauth=False,
if_exists="fail",
auth_local_webserver=False,
table_schema=None,
location=None,
progress_bar=True,
credentials=None,
) -> None:
"""
Write a DataFrame to a Google BigQuery table.
This function requires the `pandas-gbq package
<https://pandas-gbq.readthedocs.io>`__.
See the `How to authenticate with Google BigQuery
<https://pandas-gbq.readthedocs.io/en/latest/howto/authentication.html>`__
guide for authentication instructions.
Parameters
----------
destination_table : str
Name of table to be written, in the form ``dataset.tablename``.
project_id : str, optional
Google BigQuery Account project ID. Optional when available from
the environment.
chunksize : int, optional
Number of rows to be inserted in each chunk from the dataframe.
Set to ``None`` to load the whole dataframe at once.
reauth : bool, default False
Force Google BigQuery to re-authenticate the user. This is useful
if multiple accounts are used.
if_exists : str, default 'fail'
Behavior when the destination table exists. Value can be one of:
``'fail'``
If table exists raise pandas_gbq.gbq.TableCreationError.
``'replace'``
If table exists, drop it, recreate it, and insert data.
``'append'``
If table exists, insert data. Create if does not exist.
auth_local_webserver : bool, default False
Use the `local webserver flow`_ instead of the `console flow`_
when getting user credentials.
.. _local webserver flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_local_server
.. _console flow:
http://google-auth-oauthlib.readthedocs.io/en/latest/reference/google_auth_oauthlib.flow.html#google_auth_oauthlib.flow.InstalledAppFlow.run_console
*New in version 0.2.0 of pandas-gbq*.
table_schema : list of dicts, optional
List of BigQuery table fields to which according DataFrame
columns conform to, e.g. ``[{'name': 'col1', 'type':
'STRING'},...]``. If schema is not provided, it will be
generated according to dtypes of DataFrame columns. See
BigQuery API documentation on available names of a field.
*New in version 0.3.1 of pandas-gbq*.
location : str, optional
Location where the load job should run. See the `BigQuery locations
documentation
<https://cloud.google.com/bigquery/docs/dataset-locations>`__ for a
list of available locations. The location must match that of the
target dataset.
*New in version 0.5.0 of pandas-gbq*.
progress_bar : bool, default True
Use the library `tqdm` to show the progress bar for the upload,
chunk by chunk.
*New in version 0.5.0 of pandas-gbq*.
credentials : google.auth.credentials.Credentials, optional
Credentials for accessing Google APIs. Use this parameter to
override default credentials, such as to use Compute Engine
:class:`google.auth.compute_engine.Credentials` or Service
Account :class:`google.oauth2.service_account.Credentials`
directly.
*New in version 0.8.0 of pandas-gbq*.
.. versionadded:: 0.24.0
See Also
--------
pandas_gbq.to_gbq : This function in the pandas-gbq library.
read_gbq : Read a DataFrame from Google BigQuery.
"""
from pandas.io import gbq
gbq.to_gbq(
self,
destination_table,
project_id=project_id,
chunksize=chunksize,
reauth=reauth,
if_exists=if_exists,
auth_local_webserver=auth_local_webserver,
table_schema=table_schema,
location=location,
progress_bar=progress_bar,
credentials=credentials,
)
@classmethod
def from_records(
cls,
data,
index=None,
exclude=None,
columns=None,
coerce_float=False,
nrows=None,
) -> "DataFrame":
"""
Convert structured or record ndarray to DataFrame.
Parameters
----------
data : ndarray (structured dtype), list of tuples, dict, or DataFrame
index : str, list of fields, array-like
Field of array to use as the index, alternately a specific set of
input labels to use.
exclude : sequence, default None
Columns or fields to exclude.
columns : sequence, default None
Column names to use. If the passed data do not have names
associated with them, this argument provides names for the
columns. Otherwise this argument indicates the order of the columns
in the result (any names not found in the data will become all-NA
columns).
coerce_float : bool, default False
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
nrows : int, default None
Number of rows to read if data is an iterator.
Returns
-------
DataFrame
"""
# Make a copy of the input columns so we can modify it
if columns is not None:
columns = ensure_index(columns)
if is_iterator(data):
if nrows == 0:
return cls()
try:
first_row = next(data)
except StopIteration:
return cls(index=index, columns=columns)
dtype = None
if hasattr(first_row, "dtype") and first_row.dtype.names:
dtype = first_row.dtype
values = [first_row]
if nrows is None:
values += data
else:
values.extend(itertools.islice(data, nrows - 1))
if dtype is not None:
data = np.array(values, dtype=dtype)
else:
data = values
if isinstance(data, dict):
if columns is None:
columns = arr_columns = ensure_index(sorted(data))
arrays = [data[k] for k in columns]
else:
arrays = []
arr_columns = []
for k, v in data.items():
if k in columns:
arr_columns.append(k)
arrays.append(v)
arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns)
elif isinstance(data, (np.ndarray, DataFrame)):
arrays, columns = to_arrays(data, columns)
if columns is not None:
columns = ensure_index(columns)
arr_columns = columns
else:
arrays, arr_columns = to_arrays(data, columns, coerce_float=coerce_float)
arr_columns = ensure_index(arr_columns)
if columns is not None:
columns = ensure_index(columns)
else:
columns = arr_columns
if exclude is None:
exclude = set()
else:
exclude = set(exclude)
result_index = None
if index is not None:
if isinstance(index, str) or not hasattr(index, "__iter__"):
i = columns.get_loc(index)
exclude.add(index)
if len(arrays) > 0:
result_index = Index(arrays[i], name=index)
else:
result_index = Index([], name=index)
else:
try:
index_data = [arrays[arr_columns.get_loc(field)] for field in index]
except (KeyError, TypeError):
# raised by get_loc, see GH#29258
result_index = index
else:
result_index = ensure_index_from_sequences(index_data, names=index)
exclude.update(index)
if any(exclude):
arr_exclude = [x for x in exclude if x in arr_columns]
to_remove = [arr_columns.get_loc(col) for col in arr_exclude]
arrays = [v for i, v in enumerate(arrays) if i not in to_remove]
arr_columns = arr_columns.drop(arr_exclude)
columns = columns.drop(exclude)
mgr = arrays_to_mgr(arrays, arr_columns, result_index, columns)
return cls(mgr)
def to_records(
self, index=True, column_dtypes=None, index_dtypes=None
) -> np.recarray:
"""
Convert DataFrame to a NumPy record array.
Index will be included as the first field of the record array if
requested.
Parameters
----------
index : bool, default True
Include index in resulting record array, stored in 'index'
field or using the index label, if set.
column_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all columns. If
a dictionary, a mapping of column names and indices (zero-indexed)
to specific data types.
index_dtypes : str, type, dict, default None
.. versionadded:: 0.24.0
If a string or type, the data type to store all index levels. If
a dictionary, a mapping of index level names and indices
(zero-indexed) to specific data types.
This mapping is applied only if `index=True`.
Returns
-------
numpy.recarray
NumPy ndarray with the DataFrame labels as fields and each row
of the DataFrame as entries.
See Also
--------
DataFrame.from_records: Convert structured or record ndarray
to DataFrame.
numpy.recarray: An ndarray that allows field access using
attributes, analogous to typed columns in a
spreadsheet.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2], 'B': [0.5, 0.75]},
... index=['a', 'b'])
>>> df
A B
a 1 0.50
b 2 0.75
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('index', 'O'), ('A', '<i8'), ('B', '<f8')])
If the DataFrame index has no label then the recarray field name
is set to 'index'. If the index has a label then this is used as the
field name:
>>> df.index = df.index.rename("I")
>>> df.to_records()
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i8'), ('B', '<f8')])
The index can be excluded from the record array:
>>> df.to_records(index=False)
rec.array([(1, 0.5 ), (2, 0.75)],
dtype=[('A', '<i8'), ('B', '<f8')])
Data types can be specified for the columns:
>>> df.to_records(column_dtypes={"A": "int32"})
rec.array([('a', 1, 0.5 ), ('b', 2, 0.75)],
dtype=[('I', 'O'), ('A', '<i4'), ('B', '<f8')])
As well as for the index:
>>> df.to_records(index_dtypes="<S2")
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S2'), ('A', '<i8'), ('B', '<f8')])
>>> index_dtypes = f"<S{df.index.str.len().max()}"
>>> df.to_records(index_dtypes=index_dtypes)
rec.array([(b'a', 1, 0.5 ), (b'b', 2, 0.75)],
dtype=[('I', 'S1'), ('A', '<i8'), ('B', '<f8')])
"""
if index:
if isinstance(self.index, ABCMultiIndex):
# array of tuples to numpy cols. copy copy copy
ix_vals = list(map(np.array, zip(*self.index.values)))
else:
ix_vals = [self.index.values]
arrays = ix_vals + [self[c]._internal_get_values() for c in self.columns]
count = 0
index_names = list(self.index.names)
if isinstance(self.index, ABCMultiIndex):
for i, n in enumerate(index_names):
if n is None:
index_names[i] = f"level_{count}"
count += 1
elif index_names[0] is None:
index_names = ["index"]
names = [str(name) for name in itertools.chain(index_names, self.columns)]
else:
arrays = [self[c]._internal_get_values() for c in self.columns]
names = [str(c) for c in self.columns]
index_names = []
index_len = len(index_names)
formats = []
for i, v in enumerate(arrays):
index = i
# When the names and arrays are collected, we
# first collect those in the DataFrame's index,
# followed by those in its columns.
#
# Thus, the total length of the array is:
# len(index_names) + len(DataFrame.columns).
#
# This check allows us to see whether we are
# handling a name / array in the index or column.
if index < index_len:
dtype_mapping = index_dtypes
name = index_names[index]
else:
index -= index_len
dtype_mapping = column_dtypes
name = self.columns[index]
# We have a dictionary, so we get the data type
# associated with the index or column (which can
# be denoted by its name in the DataFrame or its
# position in DataFrame's array of indices or
# columns, whichever is applicable.
if is_dict_like(dtype_mapping):
if name in dtype_mapping:
dtype_mapping = dtype_mapping[name]
elif index in dtype_mapping:
dtype_mapping = dtype_mapping[index]
else:
dtype_mapping = None
# If no mapping can be found, use the array's
# dtype attribute for formatting.
#
# A valid dtype must either be a type or
# string naming a type.
if dtype_mapping is None:
formats.append(v.dtype)
elif isinstance(dtype_mapping, (type, np.dtype, str)):
formats.append(dtype_mapping)
else:
element = "row" if i < index_len else "column"
msg = f"Invalid dtype {dtype_mapping} specified for {element} {name}"
raise ValueError(msg)
return np.rec.fromarrays(arrays, dtype={"names": names, "formats": formats})
@classmethod
def _from_arrays(cls, arrays, columns, index, dtype=None) -> "DataFrame":
mgr = arrays_to_mgr(arrays, columns, index, columns, dtype=dtype)
return cls(mgr)
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_stata(
self,
path,
convert_dates=None,
write_index=True,
byteorder=None,
time_stamp=None,
data_label=None,
variable_labels=None,
version=114,
convert_strl=None,
):
"""
Export DataFrame object to Stata dta format.
Writes the DataFrame to a Stata dataset file.
"dta" files contain a Stata dataset.
Parameters
----------
path : str, buffer or path object
String, path object (pathlib.Path or py._path.local.LocalPath) or
object implementing a binary write() function. If using a buffer
then the buffer will not be automatically closed after the file
data has been written.
.. versionchanged:: 1.0.0
Previously this was "fname"
convert_dates : dict
Dictionary mapping columns containing datetime types to stata
internal format to use when writing the dates. Options are 'tc',
'td', 'tm', 'tw', 'th', 'tq', 'ty'. Column can be either an integer
or a name. Datetime columns that do not have a conversion type
specified will be converted to 'tc'. Raises NotImplementedError if
a datetime column has timezone information.
write_index : bool
Write the index to Stata dataset.
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`.
time_stamp : datetime
A datetime to use as file creation date. Default is the current
time.
data_label : str, optional
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as
values. Each label must be 80 characters or smaller.
version : {114, 117, 118, 119, None}, default 114
Version to use in the output dta file. Set to None to let pandas
decide between 118 or 119 formats depending on the number of
columns in the frame. Version 114 can be read by Stata 10 and
later. Version 117 can be read by Stata 13 or later. Version 118
is supported in Stata 14 and later. Version 119 is supported in
Stata 15 and later. Version 114 limits string variables to 244
characters or fewer while versions 117 and later allow strings
with lengths up to 2,000,000 characters. Versions 118 and 119
support Unicode characters, and version 119 supports more than
32,767 variables.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Added support for formats 118 and 119.
convert_strl : list, optional
List of column names to convert to string columns to Stata StrL
format. Only available if version is 117. Storing strings in the
StrL format can produce smaller dta files if strings have more than
8 characters and values are repeated.
.. versionadded:: 0.23.0
Raises
------
NotImplementedError
* If datetimes contain timezone information
* Column dtype is not representable in Stata
ValueError
* Columns listed in convert_dates are neither datetime64[ns]
or datetime.datetime
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
See Also
--------
read_stata : Import Stata data files.
io.stata.StataWriter : Low-level writer for Stata data files.
io.stata.StataWriter117 : Low-level writer for version 117 files.
Examples
--------
>>> df = pd.DataFrame({'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]})
>>> df.to_stata('animals.dta') # doctest: +SKIP
"""
if version not in (114, 117, 118, 119, None):
raise ValueError("Only formats 114, 117, 118 and 119 are supported.")
if version == 114:
if convert_strl is not None:
raise ValueError("strl is not supported in format 114")
from pandas.io.stata import StataWriter as statawriter
elif version == 117:
from pandas.io.stata import StataWriter117 as statawriter
else: # versions 118 and 119
from pandas.io.stata import StataWriterUTF8 as statawriter
kwargs = {}
if version is None or version >= 117:
# strl conversion is only supported >= 117
kwargs["convert_strl"] = convert_strl
if version is None or version >= 118:
# Specifying the version is only supported for UTF8 (118 or 119)
kwargs["version"] = version
writer = statawriter(
path,
self,
convert_dates=convert_dates,
byteorder=byteorder,
time_stamp=time_stamp,
data_label=data_label,
write_index=write_index,
variable_labels=variable_labels,
**kwargs,
)
writer.write_file()
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_feather(self, path) -> None:
"""
Write out the binary feather-format for DataFrames.
Parameters
----------
path : str
String file path.
"""
from pandas.io.feather_format import to_feather
to_feather(self, path)
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame(
... data={"animal_1": ["elk", "pig"], "animal_2": ["dog", "quetzal"]}
... )
>>> print(df.to_markdown())
| | animal_1 | animal_2 |
|---:|:-----------|:-----------|
| 0 | elk | dog |
| 1 | pig | quetzal |
"""
)
@Substitution(klass="DataFrame")
@Appender(_shared_docs["to_markdown"])
def to_markdown(
self, buf: Optional[IO[str]] = None, mode: Optional[str] = None, **kwargs
) -> Optional[str]:
kwargs.setdefault("headers", "keys")
kwargs.setdefault("tablefmt", "pipe")
tabulate = import_optional_dependency("tabulate")
result = tabulate.tabulate(self, **kwargs)
if buf is None:
return result
buf, _, _, _ = get_filepath_or_buffer(buf, mode=mode)
assert buf is not None # Help mypy.
buf.writelines(result)
return None
@deprecate_kwarg(old_arg_name="fname", new_arg_name="path")
def to_parquet(
self,
path,
engine="auto",
compression="snappy",
index=None,
partition_cols=None,
**kwargs,
) -> None:
"""
Write a DataFrame to the binary parquet format.
.. versionadded:: 0.21.0
This function writes the dataframe as a `parquet file
<https://parquet.apache.org/>`_. You can choose different parquet
backends, and have the option of compression. See
:ref:`the user guide <io.parquet>` for more details.
Parameters
----------
path : str
File path or Root Directory path. Will be used as Root Directory
path while writing a partitioned dataset.
.. versionchanged:: 1.0.0
Previously this was "fname"
engine : {'auto', 'pyarrow', 'fastparquet'}, default 'auto'
Parquet library to use. If 'auto', then the option
``io.parquet.engine`` is used. The default ``io.parquet.engine``
behavior is to try 'pyarrow', falling back to 'fastparquet' if
'pyarrow' is unavailable.
compression : {'snappy', 'gzip', 'brotli', None}, default 'snappy'
Name of the compression to use. Use ``None`` for no compression.
index : bool, default None
If ``True``, include the dataframe's index(es) in the file output.
If ``False``, they will not be written to the file.
If ``None``, similar to ``True`` the dataframe's index(es)
will be saved. However, instead of being saved as values,
the RangeIndex will be stored as a range in the metadata so it
doesn't require much space and is faster. Other indexes will
be included as columns in the file output.
.. versionadded:: 0.24.0
partition_cols : list, optional, default None
Column names by which to partition the dataset.
Columns are partitioned in the order they are given.
.. versionadded:: 0.24.0
**kwargs
Additional arguments passed to the parquet library. See
:ref:`pandas io <io.parquet>` for more details.
See Also
--------
read_parquet : Read a parquet file.
DataFrame.to_csv : Write a csv file.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_hdf : Write to hdf.
Notes
-----
This function requires either the `fastparquet
<https://pypi.org/project/fastparquet>`_ or `pyarrow
<https://arrow.apache.org/docs/python/>`_ library.
Examples
--------
>>> df = pd.DataFrame(data={'col1': [1, 2], 'col2': [3, 4]})
>>> df.to_parquet('df.parquet.gzip',
... compression='gzip') # doctest: +SKIP
>>> pd.read_parquet('df.parquet.gzip') # doctest: +SKIP
col1 col2
0 1 3
1 2 4
"""
from pandas.io.parquet import to_parquet
to_parquet(
self,
path,
engine,
compression=compression,
index=index,
partition_cols=partition_cols,
**kwargs,
)
@Substitution(
header_type="bool",
header="Whether to print column labels, default True",
col_space_type="str or int",
col_space="The minimum width of each column in CSS length "
"units. An int is assumed to be px units.\n\n"
" .. versionadded:: 0.25.0\n"
" Ability to use str",
)
@Substitution(shared_params=fmt.common_docstring, returns=fmt.return_docstring)
def to_html(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
justify=None,
max_rows=None,
max_cols=None,
show_dimensions=False,
decimal=".",
bold_rows=True,
classes=None,
escape=True,
notebook=False,
border=None,
table_id=None,
render_links=False,
encoding=None,
):
"""
Render a DataFrame as an HTML table.
%(shared_params)s
bold_rows : bool, default True
Make the row labels bold in the output.
classes : str or list or tuple, default None
CSS class(es) to apply to the resulting html table.
escape : bool, default True
Convert the characters <, >, and & to HTML-safe sequences.
notebook : {True, False}, default False
Whether the generated HTML is for IPython Notebook.
border : int
A ``border=border`` attribute is included in the opening
`<table>` tag. Default ``pd.options.display.html.border``.
encoding : str, default "utf-8"
Set character encoding.
.. versionadded:: 1.0
table_id : str, optional
A css id is included in the opening `<table>` tag if specified.
.. versionadded:: 0.23.0
render_links : bool, default False
Convert URLs to HTML links.
.. versionadded:: 0.24.0
%(returns)s
See Also
--------
to_string : Convert DataFrame to a string.
"""
if justify is not None and justify not in fmt._VALID_JUSTIFY_PARAMETERS:
raise ValueError("Invalid value for justify parameter")
formatter = fmt.DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
formatters=formatters,
float_format=float_format,
sparsify=sparsify,
justify=justify,
index_names=index_names,
header=header,
index=index,
bold_rows=bold_rows,
escape=escape,
max_rows=max_rows,
max_cols=max_cols,
show_dimensions=show_dimensions,
decimal=decimal,
table_id=table_id,
render_links=render_links,
)
# TODO: a generic formatter wld b in DataFrameFormatter
return formatter.to_html(
buf=buf,
classes=classes,
notebook=notebook,
border=border,
encoding=encoding,
)
# ----------------------------------------------------------------------
def info(
self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None
) -> None:
"""
Print a concise summary of a DataFrame.
This method prints information about a DataFrame including
the index dtype and column dtypes, non-null values and memory usage.
Parameters
----------
verbose : bool, optional
Whether to print the full summary. By default, the setting in
``pandas.options.display.max_info_columns`` is followed.
buf : writable buffer, defaults to sys.stdout
Where to send the output. By default, the output is printed to
sys.stdout. Pass a writable buffer if you need to further process
the output.
max_cols : int, optional
When to switch from the verbose to the truncated output. If the
DataFrame has more than `max_cols` columns, the truncated output
is used. By default, the setting in
``pandas.options.display.max_info_columns`` is used.
memory_usage : bool, str, optional
Specifies whether total memory usage of the DataFrame
elements (including the index) should be displayed. By default,
this follows the ``pandas.options.display.memory_usage`` setting.
True always show memory usage. False never shows memory usage.
A value of 'deep' is equivalent to "True with deep introspection".
Memory usage is shown in human-readable units (base-2
representation). Without deep introspection a memory estimation is
made based in column dtype and number of rows assuming values
consume the same memory amount for corresponding dtypes. With deep
memory introspection, a real memory usage calculation is performed
at the cost of computational resources.
null_counts : bool, optional
Whether to show the non-null counts. By default, this is shown
only if the frame is smaller than
``pandas.options.display.max_info_rows`` and
``pandas.options.display.max_info_columns``. A value of True always
shows the counts, and False never shows the counts.
Returns
-------
None
This method prints a summary of a DataFrame and returns None.
See Also
--------
DataFrame.describe: Generate descriptive statistics of DataFrame
columns.
DataFrame.memory_usage: Memory usage of DataFrame columns.
Examples
--------
>>> int_values = [1, 2, 3, 4, 5]
>>> text_values = ['alpha', 'beta', 'gamma', 'delta', 'epsilon']
>>> float_values = [0.0, 0.25, 0.5, 0.75, 1.0]
>>> df = pd.DataFrame({"int_col": int_values, "text_col": text_values,
... "float_col": float_values})
>>> df
int_col text_col float_col
0 1 alpha 0.00
1 2 beta 0.25
2 3 gamma 0.50
3 4 delta 0.75
4 5 epsilon 1.00
Prints information of all columns:
>>> df.info(verbose=True)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 int_col 5 non-null int64
1 text_col 5 non-null object
2 float_col 5 non-null float64
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Prints a summary of columns count and its dtypes but not per column
information:
>>> df.info(verbose=False)
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 5 entries, 0 to 4
Columns: 3 entries, int_col to float_col
dtypes: float64(1), int64(1), object(1)
memory usage: 248.0+ bytes
Pipe output of DataFrame.info to buffer instead of sys.stdout, get
buffer content and writes to a text file:
>>> import io
>>> buffer = io.StringIO()
>>> df.info(buf=buffer)
>>> s = buffer.getvalue()
>>> with open("df_info.txt", "w",
... encoding="utf-8") as f: # doctest: +SKIP
... f.write(s)
260
The `memory_usage` parameter allows deep introspection mode, specially
useful for big DataFrames and fine-tune memory optimization:
>>> random_strings_array = np.random.choice(['a', 'b', 'c'], 10 ** 6)
>>> df = pd.DataFrame({
... 'column_1': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_2': np.random.choice(['a', 'b', 'c'], 10 ** 6),
... 'column_3': np.random.choice(['a', 'b', 'c'], 10 ** 6)
... })
>>> df.info()
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 22.9+ MB
>>> df.info(memory_usage='deep')
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000000 entries, 0 to 999999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 column_1 1000000 non-null object
1 column_2 1000000 non-null object
2 column_3 1000000 non-null object
dtypes: object(3)
memory usage: 188.8 MB
"""
if buf is None: # pragma: no cover
buf = sys.stdout
lines = []
lines.append(str(type(self)))
lines.append(self.index._summary())
if len(self.columns) == 0:
lines.append(f"Empty {type(self).__name__}")
fmt.buffer_put_lines(buf, lines)
return
cols = self.columns
col_count = len(self.columns)
# hack
if max_cols is None:
max_cols = get_option("display.max_info_columns", len(self.columns) + 1)
max_rows = get_option("display.max_info_rows", len(self) + 1)
if null_counts is None:
show_counts = (col_count <= max_cols) and (len(self) < max_rows)
else:
show_counts = null_counts
exceeds_info_cols = col_count > max_cols
def _verbose_repr():
lines.append(f"Data columns (total {len(self.columns)} columns):")
id_head = " # "
column_head = "Column"
col_space = 2
max_col = max(len(pprint_thing(k)) for k in cols)
len_column = len(pprint_thing(column_head))
space = max(max_col, len_column) + col_space
max_id = len(pprint_thing(col_count))
len_id = len(pprint_thing(id_head))
space_num = max(max_id, len_id) + col_space
counts = None
header = _put_str(id_head, space_num) + _put_str(column_head, space)
if show_counts:
counts = self.count()
if len(cols) != len(counts): # pragma: no cover
raise AssertionError(
f"Columns must equal counts ({len(cols)} != {len(counts)})"
)
count_header = "Non-Null Count"
len_count = len(count_header)
non_null = " non-null"
max_count = max(len(pprint_thing(k)) for k in counts) + len(non_null)
space_count = max(len_count, max_count) + col_space
count_temp = "{count}" + non_null
else:
count_header = ""
space_count = len(count_header)
len_count = space_count
count_temp = "{count}"
dtype_header = "Dtype"
len_dtype = len(dtype_header)
max_dtypes = max(len(pprint_thing(k)) for k in self.dtypes)
space_dtype = max(len_dtype, max_dtypes)
header += _put_str(count_header, space_count) + _put_str(
dtype_header, space_dtype
)
lines.append(header)
lines.append(
_put_str("-" * len_id, space_num)
+ _put_str("-" * len_column, space)
+ _put_str("-" * len_count, space_count)
+ _put_str("-" * len_dtype, space_dtype)
)
for i, col in enumerate(self.columns):
dtype = self.dtypes.iloc[i]
col = pprint_thing(col)
line_no = _put_str(f" {i}", space_num)
count = ""
if show_counts:
count = counts.iloc[i]
lines.append(
line_no
+ _put_str(col, space)
+ _put_str(count_temp.format(count=count), space_count)
+ _put_str(dtype, space_dtype)
)
def _non_verbose_repr():
lines.append(self.columns._summary(name="Columns"))
def _sizeof_fmt(num, size_qualifier):
# returns size in human readable format
for x in ["bytes", "KB", "MB", "GB", "TB"]:
if num < 1024.0:
return f"{num:3.1f}{size_qualifier} {x}"
num /= 1024.0
return f"{num:3.1f}{size_qualifier} PB"
if verbose:
_verbose_repr()
elif verbose is False: # specifically set to False, not nesc None
_non_verbose_repr()
else:
if exceeds_info_cols:
_non_verbose_repr()
else:
_verbose_repr()
counts = self._data.get_dtype_counts()
dtypes = [f"{k[0]}({k[1]:d})" for k in sorted(counts.items())]
lines.append(f"dtypes: {', '.join(dtypes)}")
if memory_usage is None:
memory_usage = get_option("display.memory_usage")
if memory_usage:
# append memory usage of df to display
size_qualifier = ""
if memory_usage == "deep":
deep = True
else:
# size_qualifier is just a best effort; not guaranteed to catch
# all cases (e.g., it misses categorical data even with object
# categories)
deep = False
if "object" in counts or self.index._is_memory_usage_qualified():
size_qualifier = "+"
mem_usage = self.memory_usage(index=True, deep=deep).sum()
lines.append(f"memory usage: {_sizeof_fmt(mem_usage, size_qualifier)}\n")
fmt.buffer_put_lines(buf, lines)
def memory_usage(self, index=True, deep=False) -> Series:
"""
Return the memory usage of each column in bytes.
The memory usage can optionally include the contribution of
the index and elements of `object` dtype.
This value is displayed in `DataFrame.info` by default. This can be
suppressed by setting ``pandas.options.display.memory_usage`` to False.
Parameters
----------
index : bool, default True
Specifies whether to include the memory usage of the DataFrame's
index in returned Series. If ``index=True``, the memory usage of
the index is the first item in the output.
deep : bool, default False
If True, introspect the data deeply by interrogating
`object` dtypes for system-level memory consumption, and include
it in the returned values.
Returns
-------
Series
A Series whose index is the original column names and whose values
is the memory usage of each column in bytes.
See Also
--------
numpy.ndarray.nbytes : Total bytes consumed by the elements of an
ndarray.
Series.memory_usage : Bytes consumed by a Series.
Categorical : Memory-efficient array for string values with
many repeated values.
DataFrame.info : Concise summary of a DataFrame.
Examples
--------
>>> dtypes = ['int64', 'float64', 'complex128', 'object', 'bool']
>>> data = dict([(t, np.ones(shape=5000).astype(t))
... for t in dtypes])
>>> df = pd.DataFrame(data)
>>> df.head()
int64 float64 complex128 object bool
0 1 1.0 1.000000+0.000000j 1 True
1 1 1.0 1.000000+0.000000j 1 True
2 1 1.0 1.000000+0.000000j 1 True
3 1 1.0 1.000000+0.000000j 1 True
4 1 1.0 1.000000+0.000000j 1 True
>>> df.memory_usage()
Index 128
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
>>> df.memory_usage(index=False)
int64 40000
float64 40000
complex128 80000
object 40000
bool 5000
dtype: int64
The memory footprint of `object` dtype columns is ignored by default:
>>> df.memory_usage(deep=True)
Index 128
int64 40000
float64 40000
complex128 80000
object 160000
bool 5000
dtype: int64
Use a Categorical for efficient storage of an object-dtype column with
many repeated values.
>>> df['object'].astype('category').memory_usage(deep=True)
5216
"""
result = Series(
[c.memory_usage(index=False, deep=deep) for col, c in self.items()],
index=self.columns,
)
if index:
result = Series(self.index.memory_usage(deep=deep), index=["Index"]).append(
result
)
return result
def transpose(self, *args, copy: bool = False) -> "DataFrame":
"""
Transpose index and columns.
Reflect the DataFrame over its main diagonal by writing rows as columns
and vice-versa. The property :attr:`.T` is an accessor to the method
:meth:`transpose`.
Parameters
----------
*args : tuple, optional
Accepted for compatibility with NumPy.
copy : bool, default False
Whether to copy the data after transposing, even for DataFrames
with a single dtype.
Note that a copy is always required for mixed dtype DataFrames,
or for DataFrames with any extension types.
Returns
-------
DataFrame
The transposed DataFrame.
See Also
--------
numpy.transpose : Permute the dimensions of a given array.
Notes
-----
Transposing a DataFrame with mixed dtypes will result in a homogeneous
DataFrame with the `object` dtype. In such a case, a copy of the data
is always made.
Examples
--------
**Square DataFrame with homogeneous dtype**
>>> d1 = {'col1': [1, 2], 'col2': [3, 4]}
>>> df1 = pd.DataFrame(data=d1)
>>> df1
col1 col2
0 1 3
1 2 4
>>> df1_transposed = df1.T # or df1.transpose()
>>> df1_transposed
0 1
col1 1 2
col2 3 4
When the dtype is homogeneous in the original DataFrame, we get a
transposed DataFrame with the same dtype:
>>> df1.dtypes
col1 int64
col2 int64
dtype: object
>>> df1_transposed.dtypes
0 int64
1 int64
dtype: object
**Non-square DataFrame with mixed dtypes**
>>> d2 = {'name': ['Alice', 'Bob'],
... 'score': [9.5, 8],
... 'employed': [False, True],
... 'kids': [0, 0]}
>>> df2 = pd.DataFrame(data=d2)
>>> df2
name score employed kids
0 Alice 9.5 False 0
1 Bob 8.0 True 0
>>> df2_transposed = df2.T # or df2.transpose()
>>> df2_transposed
0 1
name Alice Bob
score 9.5 8
employed False True
kids 0 0
When the DataFrame has mixed dtypes, we get a transposed DataFrame with
the `object` dtype:
>>> df2.dtypes
name object
score float64
employed bool
kids int64
dtype: object
>>> df2_transposed.dtypes
0 object
1 object
dtype: object
"""
nv.validate_transpose(args, dict())
# construct the args
dtypes = list(self.dtypes)
if self._is_homogeneous_type and dtypes and is_extension_array_dtype(dtypes[0]):
# We have EAs with the same dtype. We can preserve that dtype in transpose.
dtype = dtypes[0]
arr_type = dtype.construct_array_type()
values = self.values
new_values = [arr_type._from_sequence(row, dtype=dtype) for row in values]
result = self._constructor(
dict(zip(self.index, new_values)), index=self.columns
)
else:
new_values = self.values.T
if copy:
new_values = new_values.copy()
result = self._constructor(
new_values, index=self.columns, columns=self.index
)
return result.__finalize__(self)
T = property(transpose)
# ----------------------------------------------------------------------
# Indexing Methods
def _ixs(self, i: int, axis: int = 0):
"""
Parameters
----------
i : int
axis : int
Notes
-----
If slice passed, the resulting data will be a view.
"""
# irow
if axis == 0:
new_values = self._data.fast_xs(i)
# if we are a copy, mark as such
copy = isinstance(new_values, np.ndarray) and new_values.base is None
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[i],
dtype=new_values.dtype,
)
result._set_is_copy(self, copy=copy)
return result
# icol
else:
label = self.columns[i]
values = self._data.iget(i)
result = self._box_col_values(values, label)
# this is a cached value, mark it so
result._set_as_cached(label, self)
return result
def __getitem__(self, key):
key = lib.item_from_zerodim(key)
key = com.apply_if_callable(key, self)
if is_hashable(key):
# shortcut if the key is in columns
if self.columns.is_unique and key in self.columns:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
return self._get_item_cache(key)
# Do we have a slicer (on rows)?
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._slice(indexer, axis=0)
# Do we have a (boolean) DataFrame?
if isinstance(key, DataFrame):
return self.where(key)
# Do we have a (boolean) 1d indexer?
if com.is_bool_indexer(key):
return self._getitem_bool_array(key)
# We are left with two options: a single key, and a collection of keys,
# We interpret tuples as collections only for non-MultiIndex
is_single_key = isinstance(key, tuple) or not is_list_like(key)
if is_single_key:
if self.columns.nlevels > 1:
return self._getitem_multilevel(key)
indexer = self.columns.get_loc(key)
if is_integer(indexer):
indexer = [indexer]
else:
if is_iterator(key):
key = list(key)
indexer = self.loc._get_listlike_indexer(key, axis=1, raise_missing=True)[1]
# take() does not accept boolean indexers
if getattr(indexer, "dtype", None) == bool:
indexer = np.where(indexer)[0]
data = self.take(indexer, axis=1)
if is_single_key:
# What does looking for a single key in a non-unique index return?
# The behavior is inconsistent. It returns a Series, except when
# - the key itself is repeated (test on data.shape, #9519), or
# - we have a MultiIndex on columns (test on self.columns, #21309)
if data.shape[1] == 1 and not isinstance(self.columns, ABCMultiIndex):
data = data[key]
return data
def _getitem_bool_array(self, key):
# also raises Exception if object array with NA values
# warning here just in case -- previously __setitem__ was
# reindexing but __getitem__ was not; it seems more reasonable to
# go with the __setitem__ behavior since that is more consistent
# with all other indexing behavior
if isinstance(key, Series) and not key.index.equals(self.index):
warnings.warn(
"Boolean Series key will be reindexed to match DataFrame index.",
UserWarning,
stacklevel=3,
)
elif len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}."
)
# check_bool_indexer will throw exception if Series key cannot
# be reindexed to match DataFrame rows
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
return self.take(indexer, axis=0)
def _getitem_multilevel(self, key):
# self.columns is a MultiIndex
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
new_columns = self.columns[loc]
result_columns = maybe_droplevels(new_columns, key)
if self._is_mixed_type:
result = self.reindex(columns=new_columns)
result.columns = result_columns
else:
new_values = self.values[:, loc]
result = self._constructor(
new_values, index=self.index, columns=result_columns
)
result = result.__finalize__(self)
# If there is only one column being returned, and its name is
# either an empty string, or a tuple with an empty string as its
# first element, then treat the empty string as a placeholder
# and return the column as if the user had provided that empty
# string in the key. If the result is a Series, exclude the
# implied empty string from its name.
if len(result.columns) == 1:
top = result.columns[0]
if isinstance(top, tuple):
top = top[0]
if top == "":
result = result[""]
if isinstance(result, Series):
result = self._constructor_sliced(
result, index=self.index, name=key
)
result._set_is_copy(self)
return result
else:
return self._get_item_cache(key)
def _get_value(self, index, col, takeable: bool = False):
"""
Quickly retrieve single value at passed column and index.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
scalar
"""
if takeable:
series = self._iget_item_cache(col)
return com.maybe_box_datetimelike(series._values[index])
series = self._get_item_cache(col)
engine = self.index._engine
try:
return engine.get_value(series._values, index)
except KeyError:
# GH 20629
if self.index.nlevels > 1:
# partial indexing forbidden
raise
except (TypeError, ValueError):
pass
# we cannot handle direct indexing
# use positional
col = self.columns.get_loc(col)
index = self.index.get_loc(index)
return self._get_value(index, col, takeable=True)
def __setitem__(self, key, value):
key = com.apply_if_callable(key, self)
# see if we can slice the rows
indexer = convert_to_index_sliceable(self, key)
if indexer is not None:
# either we have a slice or we have a string that can be converted
# to a slice for partial-string date indexing
return self._setitem_slice(indexer, value)
if isinstance(key, DataFrame) or getattr(key, "ndim", None) == 2:
self._setitem_frame(key, value)
elif isinstance(key, (Series, np.ndarray, list, Index)):
self._setitem_array(key, value)
else:
# set column
self._set_item(key, value)
def _setitem_slice(self, key, value):
self._check_setitem_copy()
self.loc[key] = value
def _setitem_array(self, key, value):
# also raises Exception if object array with NA values
if com.is_bool_indexer(key):
if len(key) != len(self.index):
raise ValueError(
f"Item wrong length {len(key)} instead of {len(self.index)}!"
)
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
self._check_setitem_copy()
self.loc._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
if len(value.columns) != len(key):
raise ValueError("Columns must be same length as key")
for k1, k2 in zip(key, value.columns):
self[k1] = value[k2]
else:
indexer = self.loc._get_listlike_indexer(
key, axis=1, raise_missing=False
)[1]
self._check_setitem_copy()
self.loc._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
# support boolean setting with DataFrame input, e.g.
# df[df > df2] = 0
if isinstance(key, np.ndarray):
if key.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
key = self._constructor(key, **self._construct_axes_dict())
if key.values.size and not is_bool_dtype(key.values):
raise TypeError(
"Must pass DataFrame or 2-d ndarray with boolean values only"
)
self._check_inplace_setting(value)
self._check_setitem_copy()
self._where(-key, value, inplace=True)
def _set_item(self, key, value):
"""
Add series to DataFrame in specified column.
If series is a numpy-array (not a Series/TimeSeries), it must be the
same length as the DataFrames index or an error will be thrown.
Series/TimeSeries will be conformed to the DataFrames index to
ensure homogeneity.
"""
self._ensure_valid_index(value)
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
# check if we are modifying a copy
# try to set first as we want an invalid
# value exception to occur first
if len(self):
self._check_setitem_copy()
def _set_value(self, index, col, value, takeable: bool = False):
"""
Put single value at passed column and index.
Parameters
----------
index : row label
col : column label
value : scalar
takeable : interpret the index/col as indexers, default False
Returns
-------
DataFrame
If label pair is contained, will be reference to calling DataFrame,
otherwise a new object.
"""
try:
if takeable is True:
series = self._iget_item_cache(col)
return series._set_value(index, value, takeable=True)
series = self._get_item_cache(col)
engine = self.index._engine
engine.set_value(series._values, index, value)
return self
except (KeyError, TypeError):
# set using a non-recursive method & reset the cache
if takeable:
self.iloc[index, col] = value
else:
self.loc[index, col] = value
self._item_cache.pop(col, None)
return self
def _ensure_valid_index(self, value):
"""
Ensure that if we don't have an index, that we can create one from the
passed value.
"""
# GH5632, make sure that we are a Series convertible
if not len(self.index) and is_list_like(value) and len(value):
try:
value = Series(value)
except (ValueError, NotImplementedError, TypeError):
raise ValueError(
"Cannot set a frame with no defined index "
"and a value that cannot be converted to a "
"Series"
)
self._data = self._data.reindex_axis(
value.index.copy(), axis=1, fill_value=np.nan
)
def _box_item_values(self, key, values):
items = self.columns[self.columns.get_loc(key)]
if values.ndim == 2:
return self._constructor(values.T, columns=items, index=self.index)
else:
return self._box_col_values(values, items)
def _box_col_values(self, values, items):
"""
Provide boxed values for a column.
"""
klass = self._constructor_sliced
return klass(values, index=self.index, name=items, fastpath=True)
# ----------------------------------------------------------------------
# Unsorted
def query(self, expr, inplace=False, **kwargs):
"""
Query the columns of a DataFrame with a boolean expression.
Parameters
----------
expr : str
The query string to evaluate.
You can refer to variables
in the environment by prefixing them with an '@' character like
``@a + b``.
You can refer to column names that contain spaces or operators by
surrounding them in backticks. This way you can also escape
names that start with a digit, or those that are a Python keyword.
Basically when it is not valid Python identifier. See notes down
for more details.
For example, if one of your columns is called ``a a`` and you want
to sum it with ``b``, your query should be ```a a` + b``.
.. versionadded:: 0.25.0
Backtick quoting introduced.
.. versionadded:: 1.0.0
Expanding functionality of backtick quoting for more than only spaces.
inplace : bool
Whether the query should modify the data in place or return
a modified copy.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by :meth:`DataFrame.query`.
Returns
-------
DataFrame
DataFrame resulting from the provided query expression.
See Also
--------
eval : Evaluate a string describing operations on
DataFrame columns.
DataFrame.eval : Evaluate a string describing operations on
DataFrame columns.
Notes
-----
The result of the evaluation of this expression is first passed to
:attr:`DataFrame.loc` and if that fails because of a
multidimensional key (e.g., a DataFrame) then the result will be passed
to :meth:`DataFrame.__getitem__`.
This method uses the top-level :func:`eval` function to
evaluate the passed query.
The :meth:`~pandas.DataFrame.query` method uses a slightly
modified Python syntax by default. For example, the ``&`` and ``|``
(bitwise) operators have the precedence of their boolean cousins,
:keyword:`and` and :keyword:`or`. This *is* syntactically valid Python,
however the semantics are different.
You can change the semantics of the expression by passing the keyword
argument ``parser='python'``. This enforces the same semantics as
evaluation in Python space. Likewise, you can pass ``engine='python'``
to evaluate an expression using Python itself as a backend. This is not
recommended as it is inefficient compared to using ``numexpr`` as the
engine.
The :attr:`DataFrame.index` and
:attr:`DataFrame.columns` attributes of the
:class:`~pandas.DataFrame` instance are placed in the query namespace
by default, which allows you to treat both the index and columns of the
frame as a column in the frame.
The identifier ``index`` is used for the frame index; you can also
use the name of the index to identify it in a query. Please note that
Python keywords may not be used as identifiers.
For further details and examples see the ``query`` documentation in
:ref:`indexing <indexing.query>`.
*Backtick quoted variables*
Backtick quoted variables are parsed as literal Python code and
are converted internally to a Python valid identifier.
This can lead to the following problems.
During parsing a number of disallowed characters inside the backtick
quoted string are replaced by strings that are allowed as a Python identifier.
These characters include all operators in Python, the space character, the
question mark, the exclamation mark, the dollar sign, and the euro sign.
For other characters that fall outside the ASCII range (U+0001..U+007F)
and those that are not further specified in PEP 3131,
the query parser will raise an error.
This excludes whitespace different than the space character,
but also the hashtag (as it is used for comments) and the backtick
itself (backtick can also not be escaped).
In a special case, quotes that make a pair around a backtick can
confuse the parser.
For example, ```it's` > `that's``` will raise an error,
as it forms a quoted string (``'s > `that'``) with a backtick inside.
See also the Python documentation about lexical analysis
(https://docs.python.org/3/reference/lexical_analysis.html)
in combination with the source code in :mod:`pandas.core.computation.parsing`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6),
... 'B': range(10, 0, -2),
... 'C C': range(10, 5, -1)})
>>> df
A B C C
0 1 10 10
1 2 8 9
2 3 6 8
3 4 4 7
4 5 2 6
>>> df.query('A > B')
A B C C
4 5 2 6
The previous expression is equivalent to
>>> df[df.A > df.B]
A B C C
4 5 2 6
For columns with spaces in their name, you can use backtick quoting.
>>> df.query('B == `C C`')
A B C C
0 1 10 10
The previous expression is equivalent to
>>> df[df.B == df['C C']]
A B C C
0 1 10 10
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(expr, str):
msg = f"expr must be a string to be evaluated, {type(expr)} given"
raise ValueError(msg)
kwargs["level"] = kwargs.pop("level", 0) + 1
kwargs["target"] = None
res = self.eval(expr, **kwargs)
try:
new_data = self.loc[res]
except ValueError:
# when res is multi-dimensional loc raises, but this is sometimes a
# valid query
new_data = self[res]
if inplace:
self._update_inplace(new_data)
else:
return new_data
def eval(self, expr, inplace=False, **kwargs):
"""
Evaluate a string describing operations on DataFrame columns.
Operates on columns only, not specific rows or elements. This allows
`eval` to run arbitrary code, which can make you vulnerable to code
injection if you pass user input to this function.
Parameters
----------
expr : str
The expression string to evaluate.
inplace : bool, default False
If the expression contains an assignment, whether to perform the
operation inplace and mutate the existing DataFrame. Otherwise,
a new DataFrame is returned.
**kwargs
See the documentation for :func:`eval` for complete details
on the keyword arguments accepted by
:meth:`~pandas.DataFrame.query`.
Returns
-------
ndarray, scalar, or pandas object
The result of the evaluation.
See Also
--------
DataFrame.query : Evaluates a boolean expression to query the columns
of a frame.
DataFrame.assign : Can evaluate an expression or function to create new
values for a column.
eval : Evaluate a Python expression as a string using various
backends.
Notes
-----
For more details see the API documentation for :func:`~eval`.
For detailed examples see :ref:`enhancing performance with eval
<enhancingperf.eval>`.
Examples
--------
>>> df = pd.DataFrame({'A': range(1, 6), 'B': range(10, 0, -2)})
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
>>> df.eval('A + B')
0 11
1 10
2 9
3 8
4 7
dtype: int64
Assignment is allowed though by default the original DataFrame is not
modified.
>>> df.eval('C = A + B')
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
>>> df
A B
0 1 10
1 2 8
2 3 6
3 4 4
4 5 2
Use ``inplace=True`` to modify the original DataFrame.
>>> df.eval('C = A + B', inplace=True)
>>> df
A B C
0 1 10 11
1 2 8 10
2 3 6 9
3 4 4 8
4 5 2 7
"""
from pandas.core.computation.eval import eval as _eval
inplace = validate_bool_kwarg(inplace, "inplace")
resolvers = kwargs.pop("resolvers", None)
kwargs["level"] = kwargs.pop("level", 0) + 1
if resolvers is None:
index_resolvers = self._get_index_resolvers()
column_resolvers = self._get_cleaned_column_resolvers()
resolvers = column_resolvers, index_resolvers
if "target" not in kwargs:
kwargs["target"] = self
kwargs["resolvers"] = kwargs.get("resolvers", ()) + tuple(resolvers)
return _eval(expr, inplace=inplace, **kwargs)
def select_dtypes(self, include=None, exclude=None) -> "DataFrame":
"""
Return a subset of the DataFrame's columns based on the column dtypes.
Parameters
----------
include, exclude : scalar or list-like
A selection of dtypes or strings to be included/excluded. At least
one of these parameters must be supplied.
Returns
-------
DataFrame
The subset of the frame including the dtypes in ``include`` and
excluding the dtypes in ``exclude``.
Raises
------
ValueError
* If both of ``include`` and ``exclude`` are empty
* If ``include`` and ``exclude`` have overlapping elements
* If any kind of string dtype is passed in.
Notes
-----
* To select all *numeric* types, use ``np.number`` or ``'number'``
* To select strings you must use the ``object`` dtype, but note that
this will return *all* object dtype columns
* See the `numpy dtype hierarchy
<http://docs.scipy.org/doc/numpy/reference/arrays.scalars.html>`__
* To select datetimes, use ``np.datetime64``, ``'datetime'`` or
``'datetime64'``
* To select timedeltas, use ``np.timedelta64``, ``'timedelta'`` or
``'timedelta64'``
* To select Pandas categorical dtypes, use ``'category'``
* To select Pandas datetimetz dtypes, use ``'datetimetz'`` (new in
0.20.0) or ``'datetime64[ns, tz]'``
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2] * 3,
... 'b': [True, False] * 3,
... 'c': [1.0, 2.0] * 3})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
2 1 True 1.0
3 2 False 2.0
4 1 True 1.0
5 2 False 2.0
>>> df.select_dtypes(include='bool')
b
0 True
1 False
2 True
3 False
4 True
5 False
>>> df.select_dtypes(include=['float64'])
c
0 1.0
1 2.0
2 1.0
3 2.0
4 1.0
5 2.0
>>> df.select_dtypes(exclude=['int'])
b c
0 True 1.0
1 False 2.0
2 True 1.0
3 False 2.0
4 True 1.0
5 False 2.0
"""
if not is_list_like(include):
include = (include,) if include is not None else ()
if not is_list_like(exclude):
exclude = (exclude,) if exclude is not None else ()
selection = (frozenset(include), frozenset(exclude))
if not any(selection):
raise ValueError("at least one of include or exclude must be nonempty")
# convert the myriad valid dtypes object to a single representation
include = frozenset(infer_dtype_from_object(x) for x in include)
exclude = frozenset(infer_dtype_from_object(x) for x in exclude)
for dtypes in (include, exclude):
invalidate_string_dtypes(dtypes)
# can't both include AND exclude!
if not include.isdisjoint(exclude):
raise ValueError(f"include and exclude overlap on {(include & exclude)}")
# We raise when both include and exclude are empty
# Hence, we can just shrink the columns we want to keep
keep_these = np.full(self.shape[1], True)
def extract_unique_dtypes_from_dtypes_set(
dtypes_set: FrozenSet[Dtype], unique_dtypes: np.ndarray
) -> List[Dtype]:
extracted_dtypes = [
unique_dtype
for unique_dtype in unique_dtypes
if issubclass(unique_dtype.type, tuple(dtypes_set)) # type: ignore
]
return extracted_dtypes
unique_dtypes = self.dtypes.unique()
if include:
included_dtypes = extract_unique_dtypes_from_dtypes_set(
include, unique_dtypes
)
keep_these &= self.dtypes.isin(included_dtypes)
if exclude:
excluded_dtypes = extract_unique_dtypes_from_dtypes_set(
exclude, unique_dtypes
)
keep_these &= ~self.dtypes.isin(excluded_dtypes)
return self.iloc[:, keep_these.values]
def insert(self, loc, column, value, allow_duplicates=False) -> None:
"""
Insert column into DataFrame at specified location.
Raises a ValueError if `column` is already contained in the DataFrame,
unless `allow_duplicates` is set to True.
Parameters
----------
loc : int
Insertion index. Must verify 0 <= loc <= len(columns).
column : str, number, or hashable object
Label of the inserted column.
value : int, Series, or array-like
allow_duplicates : bool, optional
"""
self._ensure_valid_index(value)
value = self._sanitize_column(column, value, broadcast=False)
self._data.insert(loc, column, value, allow_duplicates=allow_duplicates)
def assign(self, **kwargs) -> "DataFrame":
r"""
Assign new columns to a DataFrame.
Returns a new object with all original columns in addition to new ones.
Existing columns that are re-assigned will be overwritten.
Parameters
----------
**kwargs : dict of {str: callable or Series}
The column names are keywords. If the values are
callable, they are computed on the DataFrame and
assigned to the new columns. The callable must not
change input DataFrame (though pandas doesn't check it).
If the values are not callable, (e.g. a Series, scalar, or array),
they are simply assigned.
Returns
-------
DataFrame
A new DataFrame with the new columns in addition to
all the existing columns.
Notes
-----
Assigning multiple columns within the same ``assign`` is possible.
Later items in '\*\*kwargs' may refer to newly created or modified
columns in 'df'; items are computed and assigned into 'df' in order.
.. versionchanged:: 0.23.0
Keyword argument order is maintained.
Examples
--------
>>> df = pd.DataFrame({'temp_c': [17.0, 25.0]},
... index=['Portland', 'Berkeley'])
>>> df
temp_c
Portland 17.0
Berkeley 25.0
Where the value is a callable, evaluated on `df`:
>>> df.assign(temp_f=lambda x: x.temp_c * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
Alternatively, the same behavior can be achieved by directly
referencing an existing Series or sequence:
>>> df.assign(temp_f=df['temp_c'] * 9 / 5 + 32)
temp_c temp_f
Portland 17.0 62.6
Berkeley 25.0 77.0
You can create multiple columns within the same assign where one
of the columns depends on another one defined within the same assign:
>>> df.assign(temp_f=lambda x: x['temp_c'] * 9 / 5 + 32,
... temp_k=lambda x: (x['temp_f'] + 459.67) * 5 / 9)
temp_c temp_f temp_k
Portland 17.0 62.6 290.15
Berkeley 25.0 77.0 298.15
"""
data = self.copy()
for k, v in kwargs.items():
data[k] = com.apply_if_callable(v, data)
return data
def _sanitize_column(self, key, value, broadcast=True):
"""
Ensures new columns (which go into the BlockManager as new blocks) are
always copied and converted into an array.
Parameters
----------
key : object
value : scalar, Series, or array-like
broadcast : bool, default True
If ``key`` matches multiple duplicate column names in the
DataFrame, this parameter indicates whether ``value`` should be
tiled so that the returned array contains a (duplicated) column for
each occurrence of the key. If False, ``value`` will not be tiled.
Returns
-------
numpy.ndarray
"""
def reindexer(value):
# reindex if necessary
if value.index.equals(self.index) or not len(self.index):
value = value._values.copy()
else:
# GH 4107
try:
value = value.reindex(self.index)._values
except ValueError as err:
# raised in MultiIndex.from_tuples, see test_insert_error_msmgs
if not value.index.is_unique:
# duplicate axis
raise err
# other
raise TypeError(
"incompatible index of inserted column with frame index"
)
return value
if isinstance(value, Series):
value = reindexer(value)
elif isinstance(value, DataFrame):
# align right-hand-side columns if self.columns
# is multi-index and self[key] is a sub-frame
if isinstance(self.columns, ABCMultiIndex) and key in self.columns:
loc = self.columns.get_loc(key)
if isinstance(loc, (slice, Series, np.ndarray, Index)):
cols = maybe_droplevels(self.columns[loc], key)
if len(cols) and not cols.equals(value.columns):
value = value.reindex(cols, axis=1)
# now align rows
value = reindexer(value).T
elif isinstance(value, ExtensionArray):
# Explicitly copy here, instead of in sanitize_index,
# as sanitize_index won't copy an EA, even with copy=True
value = value.copy()
value = sanitize_index(value, self.index, copy=False)
elif isinstance(value, Index) or is_sequence(value):
# turn me into an ndarray
value = sanitize_index(value, self.index, copy=False)
if not isinstance(value, (np.ndarray, Index)):
if isinstance(value, list) and len(value) > 0:
value = maybe_convert_platform(value)
else:
value = com.asarray_tuplesafe(value)
elif value.ndim == 2:
value = value.copy().T
elif isinstance(value, Index):
value = value.copy(deep=True)
else:
value = value.copy()
# possibly infer to datetimelike
if is_object_dtype(value.dtype):
value = maybe_infer_to_datetimelike(value)
else:
# cast ignores pandas dtypes. so save the dtype first
infer_dtype, _ = infer_dtype_from_scalar(value, pandas_dtype=True)
# upcast
value = cast_scalar_to_array(len(self.index), value)
value = maybe_cast_to_datetime(value, infer_dtype)
# return internal types directly
if is_extension_array_dtype(value):
return value
# broadcast across multiple columns if necessary
if broadcast and key in self.columns and value.ndim == 1:
if not self.columns.is_unique or isinstance(self.columns, ABCMultiIndex):
existing_piece = self[key]
if isinstance(existing_piece, DataFrame):
value = np.tile(value, (len(existing_piece.columns), 1))
return np.atleast_2d(np.asarray(value))
@property
def _series(self):
return {
item: Series(self._data.iget(idx), index=self.index, name=item)
for idx, item in enumerate(self.columns)
}
def lookup(self, row_labels, col_labels) -> np.ndarray:
"""
Label-based "fancy indexing" function for DataFrame.
Given equal-length arrays of row and column labels, return an
array of the values corresponding to each (row, col) pair.
Parameters
----------
row_labels : sequence
The row labels to use for lookup.
col_labels : sequence
The column labels to use for lookup.
Returns
-------
numpy.ndarray
Examples
--------
values : ndarray
The found values
"""
n = len(row_labels)
if n != len(col_labels):
raise ValueError("Row labels must have same size as column labels")
thresh = 1000
if not self._is_mixed_type or n > thresh:
values = self.values
ridx = self.index.get_indexer(row_labels)
cidx = self.columns.get_indexer(col_labels)
if (ridx == -1).any():
raise KeyError("One or more row labels was not found")
if (cidx == -1).any():
raise KeyError("One or more column labels was not found")
flat_index = ridx * len(self.columns) + cidx
result = values.flat[flat_index]
else:
result = np.empty(n, dtype="O")
for i, (r, c) in enumerate(zip(row_labels, col_labels)):
result[i] = self._get_value(r, c)
if is_object_dtype(result):
result = lib.maybe_convert_objects(result)
return result
# ----------------------------------------------------------------------
# Reindexing and alignment
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
frame = self
columns = axes["columns"]
if columns is not None:
frame = frame._reindex_columns(
columns, method, copy, level, fill_value, limit, tolerance
)
index = axes["index"]
if index is not None:
frame = frame._reindex_index(
index, method, copy, level, fill_value, limit, tolerance
)
return frame
def _reindex_index(
self,
new_index,
method,
copy,
level,
fill_value=np.nan,
limit=None,
tolerance=None,
):
new_index, indexer = self.index.reindex(
new_index, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{0: [new_index, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_columns(
self,
new_columns,
method,
copy,
level,
fill_value=None,
limit=None,
tolerance=None,
):
new_columns, indexer = self.columns.reindex(
new_columns, method=method, level=level, limit=limit, tolerance=tolerance
)
return self._reindex_with_indexers(
{1: [new_columns, indexer]},
copy=copy,
fill_value=fill_value,
allow_dups=False,
)
def _reindex_multi(self, axes, copy, fill_value) -> "DataFrame":
"""
We are guaranteed non-Nones in the axes.
"""
new_index, row_indexer = self.index.reindex(axes["index"])
new_columns, col_indexer = self.columns.reindex(axes["columns"])
if row_indexer is not None and col_indexer is not None:
indexer = row_indexer, col_indexer
new_values = algorithms.take_2d_multi(
self.values, indexer, fill_value=fill_value
)
return self._constructor(new_values, index=new_index, columns=new_columns)
else:
return self._reindex_with_indexers(
{0: [new_index, row_indexer], 1: [new_columns, col_indexer]},
copy=copy,
fill_value=fill_value,
)
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
) -> "DataFrame":
return super().align(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
broadcast_axis=broadcast_axis,
)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.reindex.__doc__)
@rewrite_axis_style_signature(
"labels",
[
("method", None),
("copy", True),
("level", None),
("fill_value", np.nan),
("limit", None),
("tolerance", None),
],
)
def reindex(self, *args, **kwargs) -> "DataFrame":
axes = validate_axis_style_args(self, args, kwargs, "labels", "reindex")
kwargs.update(axes)
# Pop these, since the values are in `kwargs` under different names
kwargs.pop("axis", None)
kwargs.pop("labels", None)
return self._ensure_type(super().reindex(**kwargs))
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
"""
Drop specified labels from rows or columns.
Remove rows or columns by specifying label names and corresponding
axis, or by specifying directly index or column names. When using a
multi-index, labels on different levels can be removed by specifying
the level.
Parameters
----------
labels : single label or list-like
Index or column labels to drop.
axis : {0 or 'index', 1 or 'columns'}, default 0
Whether to drop labels from the index (0 or 'index') or
columns (1 or 'columns').
index : single label or list-like
Alternative to specifying axis (``labels, axis=0``
is equivalent to ``index=labels``).
.. versionadded:: 0.21.0
columns : single label or list-like
Alternative to specifying axis (``labels, axis=1``
is equivalent to ``columns=labels``).
.. versionadded:: 0.21.0
level : int or level name, optional
For MultiIndex, level from which the labels will be removed.
inplace : bool, default False
If True, do operation inplace and return None.
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and only existing labels are
dropped.
Returns
-------
DataFrame
DataFrame without the removed index or column labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis.
See Also
--------
DataFrame.loc : Label-location based indexer for selection by label.
DataFrame.dropna : Return DataFrame with labels on given axis omitted
where (all or any) data are missing.
DataFrame.drop_duplicates : Return DataFrame with duplicate rows
removed, optionally only considering certain columns.
Series.drop : Return Series with specified index labels removed.
Examples
--------
>>> df = pd.DataFrame(np.arange(12).reshape(3, 4),
... columns=['A', 'B', 'C', 'D'])
>>> df
A B C D
0 0 1 2 3
1 4 5 6 7
2 8 9 10 11
Drop columns
>>> df.drop(['B', 'C'], axis=1)
A D
0 0 3
1 4 7
2 8 11
>>> df.drop(columns=['B', 'C'])
A D
0 0 3
1 4 7
2 8 11
Drop a row by index
>>> df.drop([0, 1])
A B C D
2 8 9 10 11
Drop columns and/or rows of MultiIndex DataFrame
>>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'],
... ['speed', 'weight', 'length']],
... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2],
... [0, 1, 2, 0, 1, 2, 0, 1, 2]])
>>> df = pd.DataFrame(index=midx, columns=['big', 'small'],
... data=[[45, 30], [200, 100], [1.5, 1], [30, 20],
... [250, 150], [1.5, 0.8], [320, 250],
... [1, 0.8], [0.3, 0.2]])
>>> df
big small
lama speed 45.0 30.0
weight 200.0 100.0
length 1.5 1.0
cow speed 30.0 20.0
weight 250.0 150.0
length 1.5 0.8
falcon speed 320.0 250.0
weight 1.0 0.8
length 0.3 0.2
>>> df.drop(index='cow', columns='small')
big
lama speed 45.0
weight 200.0
length 1.5
falcon speed 320.0
weight 1.0
length 0.3
>>> df.drop(index='length', level=1)
big small
lama speed 45.0 30.0
weight 200.0 100.0
cow speed 30.0 20.0
weight 250.0 150.0
falcon speed 320.0 250.0
weight 1.0 0.8
"""
return super().drop(
labels=labels,
axis=axis,
index=index,
columns=columns,
level=level,
inplace=inplace,
errors=errors,
)
@rewrite_axis_style_signature(
"mapper",
[("copy", True), ("inplace", False), ("level", None), ("errors", "ignore")],
)
def rename(
self,
mapper: Optional[Renamer] = None,
*,
index: Optional[Renamer] = None,
columns: Optional[Renamer] = None,
axis: Optional[Axis] = None,
copy: bool = True,
inplace: bool = False,
level: Optional[Level] = None,
errors: str = "ignore",
) -> Optional["DataFrame"]:
"""
Alter axes labels.
Function / dict values must be unique (1-to-1). Labels not contained in
a dict / Series will be left as-is. Extra labels listed don't throw an
error.
See the :ref:`user guide <basics.rename>` for more.
Parameters
----------
mapper : dict-like or function
Dict-like or functions transformations to apply to
that axis' values. Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index`` and
``columns``.
index : dict-like or function
Alternative to specifying axis (``mapper, axis=0``
is equivalent to ``index=mapper``).
columns : dict-like or function
Alternative to specifying axis (``mapper, axis=1``
is equivalent to ``columns=mapper``).
axis : int or str
Axis to target with ``mapper``. Can be either the axis name
('index', 'columns') or number (0, 1). The default is 'index'.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new DataFrame. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
DataFrame
DataFrame with the renamed axis labels.
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
DataFrame.rename_axis : Set the name of the axis.
Examples
--------
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Rename columns using a mapping:
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
Rename index using a mapping:
>>> df.rename(index={0: "x", 1: "y", 2: "z"})
A B
x 1 4
y 2 5
z 3 6
Cast index labels to a different type:
>>> df.index
RangeIndex(start=0, stop=3, step=1)
>>> df.rename(index=str).index
Index(['0', '1', '2'], dtype='object')
>>> df.rename(columns={"A": "a", "B": "b", "C": "c"}, errors="raise")
Traceback (most recent call last):
KeyError: ['C'] not found in axis
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
"""
return super().rename(
mapper=mapper,
index=index,
columns=columns,
axis=axis,
copy=copy,
inplace=inplace,
level=level,
errors=errors,
)
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.fillna.__doc__)
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
) -> Optional["DataFrame"]:
return super().fillna(
value=value,
method=method,
axis=axis,
inplace=inplace,
limit=limit,
downcast=downcast,
)
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
return super().replace(
to_replace=to_replace,
value=value,
inplace=inplace,
limit=limit,
regex=regex,
method=method,
)
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None) -> "DataFrame":
return self._ensure_type(
super().shift(periods=periods, freq=freq, axis=axis, fill_value=fill_value)
)
def set_index(
self, keys, drop=True, append=False, inplace=False, verify_integrity=False
):
"""
Set the DataFrame index using existing columns.
Set the DataFrame index (row labels) using one or more existing
columns or arrays (of the correct length). The index can replace the
existing index or expand on it.
Parameters
----------
keys : label or array-like or list of labels/arrays
This parameter can be either a single column key, a single array of
the same length as the calling DataFrame, or a list containing an
arbitrary combination of column keys and arrays. Here, "array"
encompasses :class:`Series`, :class:`Index`, ``np.ndarray``, and
instances of :class:`~collections.abc.Iterator`.
drop : bool, default True
Delete columns to be used as the new index.
append : bool, default False
Whether to append columns to existing index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
verify_integrity : bool, default False
Check the new index for duplicates. Otherwise defer the check until
necessary. Setting to False will improve the performance of this
method.
Returns
-------
DataFrame
Changed row labels.
See Also
--------
DataFrame.reset_index : Opposite of set_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame({'month': [1, 4, 7, 10],
... 'year': [2012, 2014, 2013, 2014],
... 'sale': [55, 40, 84, 31]})
>>> df
month year sale
0 1 2012 55
1 4 2014 40
2 7 2013 84
3 10 2014 31
Set the index to become the 'month' column:
>>> df.set_index('month')
year sale
month
1 2012 55
4 2014 40
7 2013 84
10 2014 31
Create a MultiIndex using columns 'year' and 'month':
>>> df.set_index(['year', 'month'])
sale
year month
2012 1 55
2014 4 40
2013 7 84
2014 10 31
Create a MultiIndex using an Index and a column:
>>> df.set_index([pd.Index([1, 2, 3, 4]), 'year'])
month sale
year
1 2012 1 55
2 2014 4 40
3 2013 7 84
4 2014 10 31
Create a MultiIndex using two Series:
>>> s = pd.Series([1, 2, 3, 4])
>>> df.set_index([s, s**2])
month year sale
1 1 1 2012 55
2 4 4 2014 40
3 9 7 2013 84
4 16 10 2014 31
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if not isinstance(keys, list):
keys = [keys]
err_msg = (
'The parameter "keys" may be a column key, one-dimensional '
"array, or a list containing only valid column keys and "
"one-dimensional arrays."
)
missing: List[Optional[Hashable]] = []
for col in keys:
if isinstance(
col, (ABCIndexClass, ABCSeries, np.ndarray, list, abc.Iterator)
):
# arrays are fine as long as they are one-dimensional
# iterators get converted to list below
if getattr(col, "ndim", 1) != 1:
raise ValueError(err_msg)
else:
# everything else gets tried as a key; see GH 24969
try:
found = col in self.columns
except TypeError:
raise TypeError(f"{err_msg}. Received column of type {type(col)}")
else:
if not found:
missing.append(col)
if missing:
raise KeyError(f"None of {missing} are in the columns")
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = list(self.index.names)
if isinstance(self.index, ABCMultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove: List[Optional[Hashable]] = []
for col in keys:
if isinstance(col, ABCMultiIndex):
for n in range(col.nlevels):
arrays.append(col._get_level_values(n))
names.extend(col.names)
elif isinstance(col, (ABCIndexClass, ABCSeries)):
# if Index then not MultiIndex (treated above)
arrays.append(col)
names.append(col.name)
elif isinstance(col, (list, np.ndarray)):
arrays.append(col)
names.append(None)
elif isinstance(col, abc.Iterator):
arrays.append(list(col))
names.append(None)
# from here, col can only be a column label
else:
arrays.append(frame[col]._values)
names.append(col)
if drop:
to_remove.append(col)
if len(arrays[-1]) != len(self):
# check newest element against length of calling frame, since
# ensure_index_from_sequences would not raise for append=False.
raise ValueError(
f"Length mismatch: Expected {len(self)} rows, "
f"received array of length {len(arrays[-1])}"
)
index = ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index[index.duplicated()].unique()
raise ValueError(f"Index has duplicate keys: {duplicates}")
# use set to handle duplicate column names gracefully in case of drop
for c in set(to_remove):
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def reset_index(
self,
level: Optional[Union[Hashable, Sequence[Hashable]]] = None,
drop: bool = False,
inplace: bool = False,
col_level: Hashable = 0,
col_fill: Optional[Hashable] = "",
) -> Optional["DataFrame"]:
"""
Reset the index, or a level of it.
Reset the index of the DataFrame, and use the default one instead.
If the DataFrame has a MultiIndex, this method can remove one or more
levels.
Parameters
----------
level : int, str, tuple, or list, default None
Only remove the given levels from the index. Removes all levels by
default.
drop : bool, default False
Do not try to insert index into dataframe columns. This resets
the index to the default integer index.
inplace : bool, default False
Modify the DataFrame in place (do not create a new object).
col_level : int or str, default 0
If the columns have multiple levels, determines which level the
labels are inserted into. By default it is inserted into the first
level.
col_fill : object, default ''
If the columns have multiple levels, determines how the other
levels are named. If None then the index name is repeated.
Returns
-------
DataFrame or None
DataFrame with the new index or None if ``inplace=True``.
See Also
--------
DataFrame.set_index : Opposite of reset_index.
DataFrame.reindex : Change to new indices or expand indices.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
>>> df = pd.DataFrame([('bird', 389.0),
... ('bird', 24.0),
... ('mammal', 80.5),
... ('mammal', np.nan)],
... index=['falcon', 'parrot', 'lion', 'monkey'],
... columns=('class', 'max_speed'))
>>> df
class max_speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index, the old index is added as a column, and a
new sequential index is used:
>>> df.reset_index()
index class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the `drop` parameter to avoid the old index being added as
a column:
>>> df.reset_index(drop=True)
class max_speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use `reset_index` with `MultiIndex`.
>>> index = pd.MultiIndex.from_tuples([('bird', 'falcon'),
... ('bird', 'parrot'),
... ('mammal', 'lion'),
... ('mammal', 'monkey')],
... names=['class', 'name'])
>>> columns = pd.MultiIndex.from_tuples([('speed', 'max'),
... ('species', 'type')])
>>> df = pd.DataFrame([(389.0, 'fly'),
... ( 24.0, 'fly'),
... ( 80.5, 'run'),
... (np.nan, 'jump')],
... index=index,
... columns=columns)
>>> df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels, we can reset a subset of them:
>>> df.reset_index(level='class')
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index, by default, it is placed in the top
level. We can place it in another level:
>>> df.reset_index(level='class', col_level=1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level, we can specify under
which one with the parameter `col_fill`:
>>> df.reset_index(level='class', col_level=1, col_fill='species')
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for `col_fill`, it is created:
>>> df.reset_index(level='class', col_level=1, col_fill='genus')
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
values = index._values
if not isinstance(index, (PeriodIndex, DatetimeIndex)):
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
# TODO(https://github.com/pandas-dev/pandas/issues/24206)
# Push this into maybe_upcast_putmask?
# We can't pass EAs there right now. Looks a bit
# complicated.
# So we unbox the ndarray_values, op, re-box.
values_type = type(values)
values_dtype = values.dtype
if issubclass(values_type, DatetimeLikeArray):
values = values._data
if mask.any():
values, _ = maybe_upcast_putmask(values, mask, np.nan)
if issubclass(values_type, DatetimeLikeArray):
values = values_type(values, dtype=values_dtype)
return values
new_index = ibase.default_index(len(new_obj))
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
to_insert: Iterable[Tuple[Any, Optional[Any]]]
if isinstance(self.index, ABCMultiIndex):
names = [
(n if n is not None else f"level_{i}")
for i, n in enumerate(self.index.names)
]
to_insert = zip(self.index.levels, self.index.codes)
else:
default = "index" if "index" not in self else "level_0"
names = [default] if self.index.name is None else [self.index.name]
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, ABCMultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = list(name) if isinstance(name, tuple) else [name]
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError(
"col_fill=None is incompatible "
f"with incomplete column name {name}"
)
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
return None
# ----------------------------------------------------------------------
# Reindex-based selection methods
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self) -> "DataFrame":
return super().isna()
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self) -> "DataFrame":
return super().isnull()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self) -> "DataFrame":
return super().notna()
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self) -> "DataFrame":
return super().notnull()
def dropna(self, axis=0, how="any", thresh=None, subset=None, inplace=False):
"""
Remove missing values.
See the :ref:`User Guide <missing_data>` for more on which values are
considered missing, and how to work with missing data.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Determine if rows or columns which contain missing values are
removed.
* 0, or 'index' : Drop rows which contain missing values.
* 1, or 'columns' : Drop columns which contain missing value.
.. versionchanged:: 1.0.0
Pass tuple or list to drop on multiple axes.
Only a single axis is allowed.
how : {'any', 'all'}, default 'any'
Determine if row or column is removed from DataFrame, when we have
at least one NA or all NA.
* 'any' : If any NA values are present, drop that row or column.
* 'all' : If all values are NA, drop that row or column.
thresh : int, optional
Require that many non-NA values.
subset : array-like, optional
Labels along other axis to consider, e.g. if you are dropping rows
these would be a list of columns to include.
inplace : bool, default False
If True, do operation inplace and return None.
Returns
-------
DataFrame
DataFrame with NA entries dropped from it.
See Also
--------
DataFrame.isna: Indicate missing values.
DataFrame.notna : Indicate existing (non-missing) values.
DataFrame.fillna : Replace missing values.
Series.dropna : Drop missing values.
Index.dropna : Drop missing indices.
Examples
--------
>>> df = pd.DataFrame({"name": ['Alfred', 'Batman', 'Catwoman'],
... "toy": [np.nan, 'Batmobile', 'Bullwhip'],
... "born": [pd.NaT, pd.Timestamp("1940-04-25"),
... pd.NaT]})
>>> df
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Drop the rows where at least one element is missing.
>>> df.dropna()
name toy born
1 Batman Batmobile 1940-04-25
Drop the columns where at least one element is missing.
>>> df.dropna(axis='columns')
name
0 Alfred
1 Batman
2 Catwoman
Drop the rows where all elements are missing.
>>> df.dropna(how='all')
name toy born
0 Alfred NaN NaT
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Keep only the rows with at least 2 non-NA values.
>>> df.dropna(thresh=2)
name toy born
1 Batman Batmobile 1940-04-25
2 Catwoman Bullwhip NaT
Define in which columns to look for missing values.
>>> df.dropna(subset=['name', 'born'])
name toy born
1 Batman Batmobile 1940-04-25
Keep the DataFrame with valid entries in the same variable.
>>> df.dropna(inplace=True)
>>> df
name toy born
1 Batman Batmobile 1940-04-25
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if isinstance(axis, (tuple, list)):
# GH20987
raise TypeError("supplying multiple axes to axis is no longer supported.")
axis = self._get_axis_number(axis)
agg_axis = 1 - axis
agg_obj = self
if subset is not None:
ax = self._get_axis(agg_axis)
indices = ax.get_indexer_for(subset)
check = indices == -1
if check.any():
raise KeyError(list(np.compress(check, subset)))
agg_obj = self.take(indices, axis=agg_axis)
count = agg_obj.count(axis=agg_axis)
if thresh is not None:
mask = count >= thresh
elif how == "any":
mask = count == len(agg_obj._get_axis(agg_axis))
elif how == "all":
mask = count > 0
else:
if how is not None:
raise ValueError(f"invalid how option: {how}")
else:
raise TypeError("must specify how or thresh")
result = self.loc(axis=axis)[mask]
if inplace:
self._update_inplace(result)
else:
return result
def drop_duplicates(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
inplace: bool = False,
ignore_index: bool = False,
) -> Optional["DataFrame"]:
"""
Return DataFrame with duplicate rows removed.
Considering certain columns is optional. Indexes, including time indexes
are ignored.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to keep.
- ``first`` : Drop duplicates except for the first occurrence.
- ``last`` : Drop duplicates except for the last occurrence.
- False : Drop all duplicates.
inplace : bool, default False
Whether to drop duplicates in place or to return a copy.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
DataFrame
DataFrame with duplicates removed or None if ``inplace=True``.
"""
if self.empty:
return self.copy()
inplace = validate_bool_kwarg(inplace, "inplace")
duplicated = self.duplicated(subset, keep=keep)
if inplace:
(inds,) = (-duplicated)._ndarray_values.nonzero()
new_data = self._data.take(inds)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(inds))
self._update_inplace(new_data)
else:
result = self[-duplicated]
if ignore_index:
result.index = ibase.default_index(len(result))
return result
return None
def duplicated(
self,
subset: Optional[Union[Hashable, Sequence[Hashable]]] = None,
keep: Union[str, bool] = "first",
) -> "Series":
"""
Return boolean Series denoting duplicate rows.
Considering certain columns is optional.
Parameters
----------
subset : column label or sequence of labels, optional
Only consider certain columns for identifying duplicates, by
default use all of the columns.
keep : {'first', 'last', False}, default 'first'
Determines which duplicates (if any) to mark.
- ``first`` : Mark duplicates as ``True`` except for the first occurrence.
- ``last`` : Mark duplicates as ``True`` except for the last occurrence.
- False : Mark all duplicates as ``True``.
Returns
-------
Series
"""
from pandas.core.sorting import get_group_index
from pandas._libs.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
if self.empty:
return Series(dtype=bool)
def f(vals):
labels, shape = algorithms.factorize(
vals, size_hint=min(len(self), _SIZE_HINT_LIMIT)
)
return labels.astype("i8", copy=False), len(shape)
if subset is None:
subset = self.columns
elif (
not np.iterable(subset)
or isinstance(subset, str)
or isinstance(subset, tuple)
and subset in self.columns
):
subset = (subset,)
# needed for mypy since can't narrow types using np.iterable
subset = cast(Iterable, subset)
# Verify all columns in subset exist in the queried dataframe
# Otherwise, raise a KeyError, same as if you try to __getitem__ with a
# key that doesn't exist.
diff = Index(subset).difference(self.columns)
if not diff.empty:
raise KeyError(diff)
vals = (col.values for name, col in self.items() if name in subset)
labels, shape = map(list, zip(*map(f, vals)))
ids = get_group_index(labels, shape, sort=False, xnull=False)
return Series(duplicated_int64(ids, keep), index=self.index)
# ----------------------------------------------------------------------
# Sorting
@Substitution(**_shared_doc_kwargs)
@Appender(NDFrame.sort_values.__doc__)
def sort_values(
self,
by,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
ignore_index=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
if not isinstance(by, list):
by = [by]
if is_sequence(ascending) and len(by) != len(ascending):
raise ValueError(
f"Length of ascending ({len(ascending)}) != length of by ({len(by)})"
)
if len(by) > 1:
from pandas.core.sorting import lexsort_indexer
keys = [self._get_label_or_level_values(x, axis=axis) for x in by]
indexer = lexsort_indexer(keys, orders=ascending, na_position=na_position)
indexer = ensure_platform_int(indexer)
else:
from pandas.core.sorting import nargsort
by = by[0]
k = self._get_label_or_level_values(by, axis=axis)
if isinstance(ascending, (tuple, list)):
ascending = ascending[0]
indexer = nargsort(
k, kind=kind, ascending=ascending, na_position=na_position
)
new_data = self._data.take(
indexer, axis=self._get_block_manager_axis(axis), verify=False
)
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def sort_index(
self,
axis=0,
level=None,
ascending: bool = True,
inplace: bool = False,
kind: str = "quicksort",
na_position: str = "last",
sort_remaining: bool = True,
ignore_index: bool = False,
):
"""
Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
ignore_index : bool, default False
If True, the resulting axis will be labeled 0, 1, …, n - 1.
.. versionadded:: 1.0.0
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted index if inplace=False, None otherwise.
"""
# TODO: this can be combined with Series.sort_index impl as
# almost identical
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
# make sure that the axis is lexsorted to start
# if not we need to reconstruct to get the correct indexer
labels = labels._sort_levels_monotonic()
if level is not None:
new_axis, indexer = labels.sortlevel(
level, ascending=ascending, sort_remaining=sort_remaining
)
elif isinstance(labels, ABCMultiIndex):
from pandas.core.sorting import lexsort_indexer
indexer = lexsort_indexer(
labels._get_codes_for_sorting(),
orders=ascending,
na_position=na_position,
)
else:
from pandas.core.sorting import nargsort
# Check monotonic-ness before sort an index
# GH11080
if (ascending and labels.is_monotonic_increasing) or (
not ascending and labels.is_monotonic_decreasing
):
if inplace:
return
else:
return self.copy()
indexer = nargsort(
labels, kind=kind, ascending=ascending, na_position=na_position
)
baxis = self._get_block_manager_axis(axis)
new_data = self._data.take(indexer, axis=baxis, verify=False)
# reconstruct axis if needed
new_data.axes[baxis] = new_data.axes[baxis]._sort_levels_monotonic()
if ignore_index:
new_data.axes[1] = ibase.default_index(len(indexer))
if inplace:
return self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def nlargest(self, n, columns, keep="first") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in descending order.
Return the first `n` rows with the largest values in `columns`, in
descending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=False).head(n)``, but more
performant.
Parameters
----------
n : int
Number of rows to return.
columns : label or list of labels
Column label(s) to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- `first` : prioritize the first occurrence(s)
- `last` : prioritize the last occurrence(s)
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The first `n` rows ordered by the given columns in descending
order.
See Also
--------
DataFrame.nsmallest : Return the first `n` rows ordered by `columns` in
ascending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Notes
-----
This function cannot be used with all column types. For example, when
specifying columns with `object` or `category` dtypes, ``TypeError`` is
raised.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nlargest`` to select the three
rows having the largest values in column "population".
>>> df.nlargest(3, 'population')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nlargest(3, 'population', keep='last')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nlargest(3, 'population', keep='all')
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
To order by the largest values in column "population" and then "GDP",
we can specify multiple columns like in the next example.
>>> df.nlargest(3, ['population', 'GDP'])
population GDP alpha-2
France 65000000 2583560 FR
Italy 59000000 1937894 IT
Brunei 434000 12128 BN
"""
return algorithms.SelectNFrame(self, n=n, keep=keep, columns=columns).nlargest()
def nsmallest(self, n, columns, keep="first") -> "DataFrame":
"""
Return the first `n` rows ordered by `columns` in ascending order.
Return the first `n` rows with the smallest values in `columns`, in
ascending order. The columns that are not specified are returned as
well, but not used for ordering.
This method is equivalent to
``df.sort_values(columns, ascending=True).head(n)``, but more
performant.
Parameters
----------
n : int
Number of items to retrieve.
columns : list or str
Column name or names to order by.
keep : {'first', 'last', 'all'}, default 'first'
Where there are duplicate values:
- ``first`` : take the first occurrence.
- ``last`` : take the last occurrence.
- ``all`` : do not drop any duplicates, even it means
selecting more than `n` items.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
See Also
--------
DataFrame.nlargest : Return the first `n` rows ordered by `columns` in
descending order.
DataFrame.sort_values : Sort DataFrame by the values.
DataFrame.head : Return the first `n` rows without re-ordering.
Examples
--------
>>> df = pd.DataFrame({'population': [59000000, 65000000, 434000,
... 434000, 434000, 337000, 11300,
... 11300, 11300],
... 'GDP': [1937894, 2583560 , 12011, 4520, 12128,
... 17036, 182, 38, 311],
... 'alpha-2': ["IT", "FR", "MT", "MV", "BN",
... "IS", "NR", "TV", "AI"]},
... index=["Italy", "France", "Malta",
... "Maldives", "Brunei", "Iceland",
... "Nauru", "Tuvalu", "Anguilla"])
>>> df
population GDP alpha-2
Italy 59000000 1937894 IT
France 65000000 2583560 FR
Malta 434000 12011 MT
Maldives 434000 4520 MV
Brunei 434000 12128 BN
Iceland 337000 17036 IS
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
In the following example, we will use ``nsmallest`` to select the
three rows having the smallest values in column "a".
>>> df.nsmallest(3, 'population')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
When using ``keep='last'``, ties are resolved in reverse order:
>>> df.nsmallest(3, 'population', keep='last')
population GDP alpha-2
Anguilla 11300 311 AI
Tuvalu 11300 38 TV
Nauru 11300 182 NR
When using ``keep='all'``, all duplicate items are maintained:
>>> df.nsmallest(3, 'population', keep='all')
population GDP alpha-2
Nauru 11300 182 NR
Tuvalu 11300 38 TV
Anguilla 11300 311 AI
To order by the largest values in column "a" and then "c", we can
specify multiple columns like in the next example.
>>> df.nsmallest(3, ['population', 'GDP'])
population GDP alpha-2
Tuvalu 11300 38 TV
Nauru 11300 182 NR
Anguilla 11300 311 AI
"""
return algorithms.SelectNFrame(
self, n=n, keep=keep, columns=columns
).nsmallest()
def swaplevel(self, i=-2, j=-1, axis=0) -> "DataFrame":
"""
Swap levels i and j in a MultiIndex on a particular axis.
Parameters
----------
i, j : int or str
Levels of the indices to be swapped. Can pass level name as string.
Returns
-------
DataFrame
"""
result = self.copy()
axis = self._get_axis_number(axis)
if axis == 0:
result.index = result.index.swaplevel(i, j)
else:
result.columns = result.columns.swaplevel(i, j)
return result
def reorder_levels(self, order, axis=0) -> "DataFrame":
"""
Rearrange index levels using input order. May not drop or duplicate levels.
Parameters
----------
order : list of int or list of str
List representing new level order. Reference level by number
(position) or by key (label).
axis : int
Where to reorder levels.
Returns
-------
DataFrame
"""
axis = self._get_axis_number(axis)
if not isinstance(self._get_axis(axis), ABCMultiIndex): # pragma: no cover
raise TypeError("Can only reorder levels on a hierarchical axis.")
result = self.copy()
if axis == 0:
result.index = result.index.reorder_levels(order)
else:
result.columns = result.columns.reorder_levels(order)
return result
# ----------------------------------------------------------------------
# Arithmetic / combination related
def _combine_frame(self, other, func, fill_value=None, level=None):
# at this point we have `self._indexed_same(other)`
if fill_value is None:
# since _arith_op may be called in a loop, avoid function call
# overhead if possible by doing this check once
_arith_op = func
else:
def _arith_op(left, right):
# for the mixed_type case where we iterate over columns,
# _arith_op(left, right) is equivalent to
# left._binop(right, func, fill_value=fill_value)
left, right = ops.fill_binop(left, right, fill_value)
return func(left, right)
if ops.should_series_dispatch(self, other, func):
# iterate over columns
new_data = ops.dispatch_to_series(self, other, _arith_op)
else:
with np.errstate(all="ignore"):
res_values = _arith_op(self.values, other.values)
new_data = dispatch_fill_zeros(func, self.values, other.values, res_values)
return new_data
def _combine_match_index(self, other, func):
# at this point we have `self.index.equals(other.index)`
if ops.should_series_dispatch(self, other, func):
# operate column-wise; avoid costly object-casting in `.values`
new_data = ops.dispatch_to_series(self, other, func)
else:
# fastpath --> operate directly on values
with np.errstate(all="ignore"):
new_data = func(self.values.T, other.values).T
return new_data
def _construct_result(self, result) -> "DataFrame":
"""
Wrap the result of an arithmetic, comparison, or logical operation.
Parameters
----------
result : DataFrame
Returns
-------
DataFrame
"""
out = self._constructor(result, index=self.index, copy=False)
# Pin columns instead of passing to constructor for compat with
# non-unique columns case
out.columns = self.columns
return out
def combine(
self, other: "DataFrame", func, fill_value=None, overwrite=True
) -> "DataFrame":
"""
Perform column-wise combine with another DataFrame.
Combines a DataFrame with `other` DataFrame using `func`
to element-wise combine columns. The row and column indexes of the
resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
The DataFrame to merge column-wise.
func : function
Function that takes two series as inputs and return a Series or a
scalar. Used to merge the two dataframes column by columns.
fill_value : scalar value, default None
The value to fill NaNs with prior to passing any column to the
merge func.
overwrite : bool, default True
If True, columns in `self` that do not exist in `other` will be
overwritten with NaNs.
Returns
-------
DataFrame
Combination of the provided DataFrames.
See Also
--------
DataFrame.combine_first : Combine two DataFrame objects and default to
non-null values in frame calling the method.
Examples
--------
Combine using a simple function that chooses the smaller column.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> take_smaller = lambda s1, s2: s1 if s1.sum() < s2.sum() else s2
>>> df1.combine(df2, take_smaller)
A B
0 0 3
1 0 3
Example using a true element-wise combine function.
>>> df1 = pd.DataFrame({'A': [5, 0], 'B': [2, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, np.minimum)
A B
0 1 2
1 0 3
Using `fill_value` fills Nones prior to passing the column to the
merge function.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 4.0
However, if the same element in both dataframes is None, that None
is preserved
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [None, 3]})
>>> df1.combine(df2, take_smaller, fill_value=-5)
A B
0 0 -5.0
1 0 3.0
Example that demonstrates the use of `overwrite` and behavior when
the axis differ between the dataframes.
>>> df1 = pd.DataFrame({'A': [0, 0], 'B': [4, 4]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [-10, 1], }, index=[1, 2])
>>> df1.combine(df2, take_smaller)
A B C
0 NaN NaN NaN
1 NaN 3.0 -10.0
2 NaN 3.0 1.0
>>> df1.combine(df2, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 -10.0
2 NaN 3.0 1.0
Demonstrating the preference of the passed in dataframe.
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1], }, index=[1, 2])
>>> df2.combine(df1, take_smaller)
A B C
0 0.0 NaN NaN
1 0.0 3.0 NaN
2 NaN 3.0 NaN
>>> df2.combine(df1, take_smaller, overwrite=False)
A B C
0 0.0 NaN NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
other_idxlen = len(other.index) # save for compare
this, other = self.align(other, copy=False)
new_index = this.index
if other.empty and len(new_index) == len(self.index):
return self.copy()
if self.empty and len(other) == other_idxlen:
return other.copy()
# sorts if possible
new_columns = this.columns.union(other.columns)
do_fill = fill_value is not None
result = {}
for col in new_columns:
series = this[col]
otherSeries = other[col]
this_dtype = series.dtype
other_dtype = otherSeries.dtype
this_mask = isna(series)
other_mask = isna(otherSeries)
# don't overwrite columns unnecessarily
# DO propagate if this column is not in the intersection
if not overwrite and other_mask.all():
result[col] = this[col].copy()
continue
if do_fill:
series = series.copy()
otherSeries = otherSeries.copy()
series[this_mask] = fill_value
otherSeries[other_mask] = fill_value
if col not in self.columns:
# If self DataFrame does not have col in other DataFrame,
# try to promote series, which is all NaN, as other_dtype.
new_dtype = other_dtype
try:
series = series.astype(new_dtype, copy=False)
except ValueError:
# e.g. new_dtype is integer types
pass
else:
# if we have different dtypes, possibly promote
new_dtype = find_common_type([this_dtype, other_dtype])
if not is_dtype_equal(this_dtype, new_dtype):
series = series.astype(new_dtype)
if not is_dtype_equal(other_dtype, new_dtype):
otherSeries = otherSeries.astype(new_dtype)
arr = func(series, otherSeries)
arr = maybe_downcast_to_dtype(arr, this_dtype)
result[col] = arr
# convert_objects just in case
return self._constructor(result, index=new_index, columns=new_columns)
def combine_first(self, other: "DataFrame") -> "DataFrame":
"""
Update null elements with value in the same location in `other`.
Combine two DataFrame objects by filling null values in one DataFrame
with non-null values from other DataFrame. The row and column indexes
of the resulting DataFrame will be the union of the two.
Parameters
----------
other : DataFrame
Provided DataFrame to use to fill null values.
Returns
-------
DataFrame
See Also
--------
DataFrame.combine : Perform series-wise operation on two DataFrames
using a given function.
Examples
--------
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]})
>>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]})
>>> df1.combine_first(df2)
A B
0 1.0 3.0
1 0.0 4.0
Null values still persist if the location of that null value
does not exist in `other`
>>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]})
>>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2])
>>> df1.combine_first(df2)
A B C
0 NaN 4.0 NaN
1 0.0 3.0 1.0
2 NaN 3.0 1.0
"""
import pandas.core.computation.expressions as expressions
def extract_values(arr):
# Does two things:
# 1. maybe gets the values from the Series / Index
# 2. convert datelike to i8
if isinstance(arr, (ABCIndexClass, ABCSeries)):
arr = arr._values
if needs_i8_conversion(arr):
if is_extension_array_dtype(arr.dtype):
arr = arr.asi8
else:
arr = arr.view("i8")
return arr
def combiner(x, y):
mask = isna(x)
if isinstance(mask, (ABCIndexClass, ABCSeries)):
mask = mask._values
x_values = extract_values(x)
y_values = extract_values(y)
# If the column y in other DataFrame is not in first DataFrame,
# just return y_values.
if y.name not in self.columns:
return y_values
return expressions.where(mask, y_values, x_values)
return self.combine(other, combiner, overwrite=False)
def update(
self, other, join="left", overwrite=True, filter_func=None, errors="ignore"
) -> None:
"""
Modify in place using non-NA values from another DataFrame.
Aligns on indices. There is no return value.
Parameters
----------
other : DataFrame, or object coercible into a DataFrame
Should have at least one matching index/column label
with the original DataFrame. If a Series is passed,
its name attribute must be set, and that will be
used as the column name to align with the original DataFrame.
join : {'left'}, default 'left'
Only left join is implemented, keeping the index and columns of the
original object.
overwrite : bool, default True
How to handle non-NA values for overlapping keys:
* True: overwrite original DataFrame's values
with values from `other`.
* False: only update values that are NA in
the original DataFrame.
filter_func : callable(1d-array) -> bool 1d-array, optional
Can choose to replace values other than NA. Return True for values
that should be updated.
errors : {'raise', 'ignore'}, default 'ignore'
If 'raise', will raise a ValueError if the DataFrame and `other`
both contain non-NA data in the same place.
.. versionchanged:: 0.24.0
Changed from `raise_conflict=False|True`
to `errors='ignore'|'raise'`.
Returns
-------
None : method directly changes calling object
Raises
------
ValueError
* When `errors='raise'` and there's overlapping non-NA data.
* When `errors` is not either `'ignore'` or `'raise'`
NotImplementedError
* If `join != 'left'`
See Also
--------
dict.update : Similar method for dictionaries.
DataFrame.merge : For column(s)-on-columns(s) operations.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, 5, 6],
... 'C': [7, 8, 9]})
>>> df.update(new_df)
>>> df
A B
0 1 4
1 2 5
2 3 6
The DataFrame's length does not increase as a result of the update,
only values at matching index/column labels are updated.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e', 'f', 'g', 'h', 'i']})
>>> df.update(new_df)
>>> df
A B
0 a d
1 b e
2 c f
For Series, it's name attribute must be set.
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_column = pd.Series(['d', 'e'], name='B', index=[0, 2])
>>> df.update(new_column)
>>> df
A B
0 a d
1 b y
2 c e
>>> df = pd.DataFrame({'A': ['a', 'b', 'c'],
... 'B': ['x', 'y', 'z']})
>>> new_df = pd.DataFrame({'B': ['d', 'e']}, index=[1, 2])
>>> df.update(new_df)
>>> df
A B
0 a x
1 b d
2 c e
If `other` contains NaNs the corresponding values are not updated
in the original dataframe.
>>> df = pd.DataFrame({'A': [1, 2, 3],
... 'B': [400, 500, 600]})
>>> new_df = pd.DataFrame({'B': [4, np.nan, 6]})
>>> df.update(new_df)
>>> df
A B
0 1 4.0
1 2 500.0
2 3 6.0
"""
import pandas.core.computation.expressions as expressions
# TODO: Support other joins
if join != "left": # pragma: no cover
raise NotImplementedError("Only left join is supported")
if errors not in ["ignore", "raise"]:
raise ValueError("The parameter errors must be either 'ignore' or 'raise'")
if not isinstance(other, DataFrame):
other = DataFrame(other)
other = other.reindex_like(self)
for col in self.columns:
this = self[col]._values
that = other[col]._values
if filter_func is not None:
with np.errstate(all="ignore"):
mask = ~filter_func(this) | isna(that)
else:
if errors == "raise":
mask_this = notna(that)
mask_that = notna(this)
if any(mask_this & mask_that):
raise ValueError("Data overlaps.")
if overwrite:
mask = isna(that)
else:
mask = notna(this)
# don't overwrite columns unnecessarily
if mask.all():
continue
self[col] = expressions.where(mask, this, that)
# ----------------------------------------------------------------------
# Data reshaping
@Appender(
"""
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level="Type").mean()
Max Speed
Type
Captive 210.0
Wild 185.0
"""
)
@Appender(_shared_docs["groupby"] % _shared_doc_kwargs)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index: bool = True,
sort: bool = True,
group_keys: bool = True,
squeeze: bool = False,
observed: bool = False,
) -> "DataFrameGroupBy":
from pandas.core.groupby.generic import DataFrameGroupBy
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return DataFrameGroupBy(
obj=self,
keys=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
)
_shared_docs[
"pivot"
] = """
Return reshaped DataFrame organized by given index / column values.
Reshape data (produce a "pivot" table) based on column values. Uses
unique values from specified `index` / `columns` to form axes of the
resulting DataFrame. This function does not support data
aggregation, multiple values will result in a MultiIndex in the
columns. See the :ref:`User Guide <reshaping>` for more on reshaping.
Parameters
----------%s
index : str or object, optional
Column to use to make new frame's index. If None, uses
existing index.
columns : str or object
Column to use to make new frame's columns.
values : str, object or a list of the previous, optional
Column(s) to use for populating new frame's values. If not
specified, all remaining columns will be used and the result will
have hierarchically indexed columns.
.. versionchanged:: 0.23.0
Also accept list of column names.
Returns
-------
DataFrame
Returns reshaped DataFrame.
Raises
------
ValueError:
When there are any `index`, `columns` combinations with multiple
values. `DataFrame.pivot_table` when you need to aggregate.
See Also
--------
DataFrame.pivot_table : Generalization of pivot that can handle
duplicate values for one index/column pair.
DataFrame.unstack : Pivot based on the index values instead of a
column.
Notes
-----
For finer-tuned control, see hierarchical indexing documentation along
with the related stack/unstack methods.
Examples
--------
>>> df = pd.DataFrame({'foo': ['one', 'one', 'one', 'two', 'two',
... 'two'],
... 'bar': ['A', 'B', 'C', 'A', 'B', 'C'],
... 'baz': [1, 2, 3, 4, 5, 6],
... 'zoo': ['x', 'y', 'z', 'q', 'w', 't']})
>>> df
foo bar baz zoo
0 one A 1 x
1 one B 2 y
2 one C 3 z
3 two A 4 q
4 two B 5 w
5 two C 6 t
>>> df.pivot(index='foo', columns='bar', values='baz')
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar')['baz']
bar A B C
foo
one 1 2 3
two 4 5 6
>>> df.pivot(index='foo', columns='bar', values=['baz', 'zoo'])
baz zoo
bar A B C A B C
foo
one 1 2 3 x y z
two 4 5 6 q w t
A ValueError is raised if there are any duplicates.
>>> df = pd.DataFrame({"foo": ['one', 'one', 'two', 'two'],
... "bar": ['A', 'A', 'B', 'C'],
... "baz": [1, 2, 3, 4]})
>>> df
foo bar baz
0 one A 1
1 one A 2
2 two B 3
3 two C 4
Notice that the first two rows are the same for our `index`
and `columns` arguments.
>>> df.pivot(index='foo', columns='bar', values='baz')
Traceback (most recent call last):
...
ValueError: Index contains duplicate entries, cannot reshape
"""
@Substitution("")
@Appender(_shared_docs["pivot"])
def pivot(self, index=None, columns=None, values=None) -> "DataFrame":
from pandas.core.reshape.pivot import pivot
return pivot(self, index=index, columns=columns, values=values)
_shared_docs[
"pivot_table"
] = """
Create a spreadsheet-style pivot table as a DataFrame.
The levels in the pivot table will be stored in MultiIndex objects
(hierarchical indexes) on the index and columns of the result DataFrame.
Parameters
----------%s
values : column to aggregate, optional
index : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table index. If an array is passed,
it is being used as the same manner as column values.
columns : column, Grouper, array, or list of the previous
If an array is passed, it must be the same length as the data. The
list can contain any of the other types (except list).
Keys to group by on the pivot table column. If an array is passed,
it is being used as the same manner as column values.
aggfunc : function, list of functions, dict, default numpy.mean
If list of functions passed, the resulting pivot table will have
hierarchical columns whose top level are the function names
(inferred from the function objects themselves)
If dict is passed, the key is column to aggregate and value
is function or list of functions.
fill_value : scalar, default None
Value to replace missing values with.
margins : bool, default False
Add all row / columns (e.g. for subtotal / grand totals).
dropna : bool, default True
Do not include columns whose entries are all NaN.
margins_name : str, default 'All'
Name of the row / column that will contain the totals
when margins is True.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionchanged:: 0.25.0
Returns
-------
DataFrame
An Excel style pivot table.
See Also
--------
DataFrame.pivot : Pivot without aggregation that can handle
non-numeric data.
Examples
--------
>>> df = pd.DataFrame({"A": ["foo", "foo", "foo", "foo", "foo",
... "bar", "bar", "bar", "bar"],
... "B": ["one", "one", "one", "two", "two",
... "one", "one", "two", "two"],
... "C": ["small", "large", "large", "small",
... "small", "large", "small", "small",
... "large"],
... "D": [1, 2, 2, 3, 3, 4, 5, 6, 7],
... "E": [2, 4, 5, 5, 6, 6, 8, 9, 9]})
>>> df
A B C D E
0 foo one small 1 2
1 foo one large 2 4
2 foo one large 2 5
3 foo two small 3 5
4 foo two small 3 6
5 bar one large 4 6
6 bar one small 5 8
7 bar two small 6 9
8 bar two large 7 9
This first example aggregates values by taking the sum.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum)
>>> table
C large small
A B
bar one 4.0 5.0
two 7.0 6.0
foo one 4.0 1.0
two NaN 6.0
We can also fill missing values using the `fill_value` parameter.
>>> table = pd.pivot_table(df, values='D', index=['A', 'B'],
... columns=['C'], aggfunc=np.sum, fill_value=0)
>>> table
C large small
A B
bar one 4 5
two 7 6
foo one 4 1
two 0 6
The next example aggregates by taking the mean across multiple columns.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': np.mean})
>>> table
D E
A C
bar large 5.500000 7.500000
small 5.500000 8.500000
foo large 2.000000 4.500000
small 2.333333 4.333333
We can also calculate multiple types of aggregations for any given
value column.
>>> table = pd.pivot_table(df, values=['D', 'E'], index=['A', 'C'],
... aggfunc={'D': np.mean,
... 'E': [min, max, np.mean]})
>>> table
D E
mean max mean min
A C
bar large 5.500000 9.0 7.500000 6.0
small 5.500000 9.0 8.500000 8.0
foo large 2.000000 5.0 4.500000 4.0
small 2.333333 6.0 4.333333 2.0
"""
@Substitution("")
@Appender(_shared_docs["pivot_table"])
def pivot_table(
self,
values=None,
index=None,
columns=None,
aggfunc="mean",
fill_value=None,
margins=False,
dropna=True,
margins_name="All",
observed=False,
) -> "DataFrame":
from pandas.core.reshape.pivot import pivot_table
return pivot_table(
self,
values=values,
index=index,
columns=columns,
aggfunc=aggfunc,
fill_value=fill_value,
margins=margins,
dropna=dropna,
margins_name=margins_name,
observed=observed,
)
def stack(self, level=-1, dropna=True):
"""
Stack the prescribed level(s) from columns to index.
Return a reshaped DataFrame or Series having a multi-level
index with one or more new inner-most levels compared to the current
DataFrame. The new inner-most levels are created by pivoting the
columns of the current dataframe:
- if the columns have a single level, the output is a Series;
- if the columns have multiple levels, the new index
level(s) is (are) taken from the prescribed level(s) and
the output is a DataFrame.
The new index levels are sorted.
Parameters
----------
level : int, str, list, default -1
Level(s) to stack from the column axis onto the index
axis, defined as one index or label, or a list of indices
or labels.
dropna : bool, default True
Whether to drop rows in the resulting Frame/Series with
missing values. Stacking a column level onto the index
axis can create combinations of index and column values
that are missing from the original dataframe. See Examples
section.
Returns
-------
DataFrame or Series
Stacked dataframe or series.
See Also
--------
DataFrame.unstack : Unstack prescribed level(s) from index axis
onto column axis.
DataFrame.pivot : Reshape dataframe from long format to wide
format.
DataFrame.pivot_table : Create a spreadsheet-style pivot table
as a DataFrame.
Notes
-----
The function is named by analogy with a collection of books
being reorganized from being side by side on a horizontal
position (the columns of the dataframe) to being stacked
vertically on top of each other (in the index of the
dataframe).
Examples
--------
**Single level columns**
>>> df_single_level_cols = pd.DataFrame([[0, 1], [2, 3]],
... index=['cat', 'dog'],
... columns=['weight', 'height'])
Stacking a dataframe with a single level column axis returns a Series:
>>> df_single_level_cols
weight height
cat 0 1
dog 2 3
>>> df_single_level_cols.stack()
cat weight 0
height 1
dog weight 2
height 3
dtype: int64
**Multi level columns: simple case**
>>> multicol1 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('weight', 'pounds')])
>>> df_multi_level_cols1 = pd.DataFrame([[1, 2], [2, 4]],
... index=['cat', 'dog'],
... columns=multicol1)
Stacking a dataframe with a multi-level column axis:
>>> df_multi_level_cols1
weight
kg pounds
cat 1 2
dog 2 4
>>> df_multi_level_cols1.stack()
weight
cat kg 1
pounds 2
dog kg 2
pounds 4
**Missing values**
>>> multicol2 = pd.MultiIndex.from_tuples([('weight', 'kg'),
... ('height', 'm')])
>>> df_multi_level_cols2 = pd.DataFrame([[1.0, 2.0], [3.0, 4.0]],
... index=['cat', 'dog'],
... columns=multicol2)
It is common to have missing values when stacking a dataframe
with multi-level columns, as the stacked dataframe typically
has more values than the original dataframe. Missing values
are filled with NaNs:
>>> df_multi_level_cols2
weight height
kg m
cat 1.0 2.0
dog 3.0 4.0
>>> df_multi_level_cols2.stack()
height weight
cat kg NaN 1.0
m 2.0 NaN
dog kg NaN 3.0
m 4.0 NaN
**Prescribing the level(s) to be stacked**
The first parameter controls which level or levels are stacked:
>>> df_multi_level_cols2.stack(0)
kg m
cat height NaN 2.0
weight 1.0 NaN
dog height NaN 4.0
weight 3.0 NaN
>>> df_multi_level_cols2.stack([0, 1])
cat height m 2.0
weight kg 1.0
dog height m 4.0
weight kg 3.0
dtype: float64
**Dropping missing values**
>>> df_multi_level_cols3 = pd.DataFrame([[None, 1.0], [2.0, 3.0]],
... index=['cat', 'dog'],
... columns=multicol2)
Note that rows where all values are missing are dropped by
default but this behaviour can be controlled via the dropna
keyword parameter:
>>> df_multi_level_cols3
weight height
kg m
cat NaN 1.0
dog 2.0 3.0
>>> df_multi_level_cols3.stack(dropna=False)
height weight
cat kg NaN NaN
m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
>>> df_multi_level_cols3.stack(dropna=True)
height weight
cat m 1.0 NaN
dog kg NaN 2.0
m 3.0 NaN
"""
from pandas.core.reshape.reshape import stack, stack_multiple
if isinstance(level, (tuple, list)):
return stack_multiple(self, level, dropna=dropna)
else:
return stack(self, level, dropna=dropna)
def explode(self, column: Union[str, Tuple]) -> "DataFrame":
"""
Transform each element of a list-like to a row, replicating index values.
.. versionadded:: 0.25.0
Parameters
----------
column : str or tuple
Column to explode.
Returns
-------
DataFrame
Exploded lists to rows of the subset columns;
index will be duplicated for these rows.
Raises
------
ValueError :
if columns of the frame are not unique.
See Also
--------
DataFrame.unstack : Pivot a level of the (necessarily hierarchical)
index labels.
DataFrame.melt : Unpivot a DataFrame from wide format to long format.
Series.explode : Explode a DataFrame from list-like columns to long format.
Notes
-----
This routine will explode list-likes including lists, tuples,
Series, and np.ndarray. The result dtype of the subset rows will
be object. Scalars will be returned unchanged. Empty list-likes will
result in a np.nan for that row.
Examples
--------
>>> df = pd.DataFrame({'A': [[1, 2, 3], 'foo', [], [3, 4]], 'B': 1})
>>> df
A B
0 [1, 2, 3] 1
1 foo 1
2 [] 1
3 [3, 4] 1
>>> df.explode('A')
A B
0 1 1
0 2 1
0 3 1
1 foo 1
2 NaN 1
3 3 1
3 4 1
"""
if not (is_scalar(column) or isinstance(column, tuple)):
raise ValueError("column must be a scalar")
if not self.columns.is_unique:
raise ValueError("columns must be unique")
df = self.reset_index(drop=True)
# TODO: use overload to refine return type of reset_index
assert df is not None # needed for mypy
result = df[column].explode()
result = df.drop([column], axis=1).join(result)
result.index = self.index.take(result.index)
result = result.reindex(columns=self.columns, copy=False)
return result
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels.
Returns a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, str, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name.
fill_value : int, str or dict
Replace NaN with this value if the unstack produces missing values.
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value)
_shared_docs[
"melt"
] = """
Unpivot a DataFrame from wide to long format, optionally leaving identifiers set.
This function is useful to massage a DataFrame into a format where one
or more columns are identifier variables (`id_vars`), while all other
columns, considered measured variables (`value_vars`), are "unpivoted" to
the row axis, leaving just two non-identifier columns, 'variable' and
'value'.
%(versionadded)s
Parameters
----------
id_vars : tuple, list, or ndarray, optional
Column(s) to use as identifier variables.
value_vars : tuple, list, or ndarray, optional
Column(s) to unpivot. If not specified, uses all columns that
are not set as `id_vars`.
var_name : scalar
Name to use for the 'variable' column. If None it uses
``frame.columns.name`` or 'variable'.
value_name : scalar, default 'value'
Name to use for the 'value' column.
col_level : int or str, optional
If columns are a MultiIndex then use this level to melt.
Returns
-------
DataFrame
Unpivoted DataFrame.
See Also
--------
%(other)s
pivot_table
DataFrame.pivot
Series.explode
Examples
--------
>>> df = pd.DataFrame({'A': {0: 'a', 1: 'b', 2: 'c'},
... 'B': {0: 1, 1: 3, 2: 5},
... 'C': {0: 2, 1: 4, 2: 6}})
>>> df
A B C
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)sid_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=['A'], value_vars=['B', 'C'])
A variable value
0 a B 1
1 b B 3
2 c B 5
3 a C 2
4 b C 4
5 c C 6
The names of 'variable' and 'value' columns can be customized:
>>> %(caller)sid_vars=['A'], value_vars=['B'],
... var_name='myVarname', value_name='myValname')
A myVarname myValname
0 a B 1
1 b B 3
2 c B 5
If you have multi-index columns:
>>> df.columns = [list('ABC'), list('DEF')]
>>> df
A B C
D E F
0 a 1 2
1 b 3 4
2 c 5 6
>>> %(caller)scol_level=0, id_vars=['A'], value_vars=['B'])
A variable value
0 a B 1
1 b B 3
2 c B 5
>>> %(caller)sid_vars=[('A', 'D')], value_vars=[('B', 'E')])
(A, D) variable_0 variable_1 value
0 a B E 1
1 b B E 3
2 c B E 5
"""
@Appender(
_shared_docs["melt"]
% dict(
caller="df.melt(", versionadded=".. versionadded:: 0.20.0\n", other="melt"
)
)
def melt(
self,
id_vars=None,
value_vars=None,
var_name=None,
value_name="value",
col_level=None,
) -> "DataFrame":
from pandas.core.reshape.melt import melt
return melt(
self,
id_vars=id_vars,
value_vars=value_vars,
var_name=var_name,
value_name=value_name,
col_level=col_level,
)
# ----------------------------------------------------------------------
# Time series-related
def diff(self, periods=1, axis=0) -> "DataFrame":
"""
First discrete difference of element.
Calculates the difference of a DataFrame element compared with another
element in the DataFrame (default is the element in the same column
of the previous row).
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference, accepts negative
values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
Returns
-------
DataFrame
See Also
--------
Series.diff: First discrete difference for a Series.
DataFrame.pct_change: Percent change over given number of periods.
DataFrame.shift: Shift index by desired number of periods with an
optional time freq.
Examples
--------
Difference with previous row
>>> df = pd.DataFrame({'a': [1, 2, 3, 4, 5, 6],
... 'b': [1, 1, 2, 3, 5, 8],
... 'c': [1, 4, 9, 16, 25, 36]})
>>> df
a b c
0 1 1 1
1 2 1 4
2 3 2 9
3 4 3 16
4 5 5 25
5 6 8 36
>>> df.diff()
a b c
0 NaN NaN NaN
1 1.0 0.0 3.0
2 1.0 1.0 5.0
3 1.0 1.0 7.0
4 1.0 2.0 9.0
5 1.0 3.0 11.0
Difference with previous column
>>> df.diff(axis=1)
a b c
0 NaN 0.0 0.0
1 NaN -1.0 3.0
2 NaN -1.0 7.0
3 NaN -1.0 13.0
4 NaN 0.0 20.0
5 NaN 2.0 28.0
Difference with 3rd previous row
>>> df.diff(periods=3)
a b c
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 3.0 2.0 15.0
4 3.0 4.0 21.0
5 3.0 6.0 27.0
Difference with following row
>>> df.diff(periods=-1)
a b c
0 -1.0 0.0 -3.0
1 -1.0 -1.0 -5.0
2 -1.0 -1.0 -7.0
3 -1.0 -2.0 -9.0
4 -1.0 -3.0 -11.0
5 NaN NaN NaN
"""
bm_axis = self._get_block_manager_axis(axis)
new_data = self._data.diff(n=periods, axis=bm_axis)
return self._constructor(new_data)
# ----------------------------------------------------------------------
# Function application
def _gotitem(
self,
key: Union[str, List[str]],
ndim: int,
subset: Optional[Union[Series, ABCDataFrame]] = None,
) -> Union[Series, ABCDataFrame]:
"""
Sub-classes to define. Return a sliced object.
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if subset is None:
subset = self
elif subset.ndim == 1: # is Series
return subset
# TODO: _shallow_copy(subset)?
return subset[key]
_agg_summary_and_see_also_doc = dedent(
"""
The aggregation operations are always performed over an axis, either the
index (default) or the column axis. This behavior is different from
`numpy` aggregation functions (`mean`, `median`, `prod`, `sum`, `std`,
`var`), where the default is to compute the aggregation of the flattened
array, e.g., ``numpy.mean(arr_2d)`` as opposed to
``numpy.mean(arr_2d, axis=0)``.
`agg` is an alias for `aggregate`. Use the alias.
See Also
--------
DataFrame.apply : Perform any type of operations.
DataFrame.transform : Perform transformation type operations.
core.groupby.GroupBy : Perform operations over groups.
core.resample.Resampler : Perform operations over resampled bins.
core.window.Rolling : Perform operations over rolling window.
core.window.Expanding : Perform operations over expanding window.
core.window.EWM : Perform operation over exponential weighted
window.
"""
)
_agg_examples_doc = dedent(
"""
Examples
--------
>>> df = pd.DataFrame([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9],
... [np.nan, np.nan, np.nan]],
... columns=['A', 'B', 'C'])
Aggregate these functions over the rows.
>>> df.agg(['sum', 'min'])
A B C
sum 12.0 15.0 18.0
min 1.0 2.0 3.0
Different aggregations per column.
>>> df.agg({'A' : ['sum', 'min'], 'B' : ['min', 'max']})
A B
max NaN 8.0
min 1.0 2.0
sum 12.0 NaN
Aggregate over the columns.
>>> df.agg("mean", axis="columns")
0 2.0
1 5.0
2 8.0
3 NaN
dtype: float64
"""
)
@Substitution(
see_also=_agg_summary_and_see_also_doc,
examples=_agg_examples_doc,
versionadded="\n.. versionadded:: 0.20.0\n",
**_shared_doc_kwargs,
)
@Appender(_shared_docs["aggregate"])
def aggregate(self, func, axis=0, *args, **kwargs):
axis = self._get_axis_number(axis)
result = None
try:
result, how = self._aggregate(func, axis=axis, *args, **kwargs)
except TypeError:
pass
if result is None:
return self.apply(func, axis=axis, args=args, **kwargs)
return result
def _aggregate(self, arg, axis=0, *args, **kwargs):
if axis == 1:
# NDFrame.aggregate returns a tuple, and we need to transpose
# only result
result, how = self.T._aggregate(arg, *args, **kwargs)
result = result.T if result is not None else result
return result, how
return super()._aggregate(arg, *args, **kwargs)
agg = aggregate
@Appender(_shared_docs["transform"] % _shared_doc_kwargs)
def transform(self, func, axis=0, *args, **kwargs) -> "DataFrame":
axis = self._get_axis_number(axis)
if axis == 1:
return self.T.transform(func, *args, **kwargs).T
return super().transform(func, *args, **kwargs)
def apply(self, func, axis=0, raw=False, result_type=None, args=(), **kwds):
"""
Apply a function along an axis of the DataFrame.
Objects passed to the function are Series objects whose index is
either the DataFrame's index (``axis=0``) or the DataFrame's columns
(``axis=1``). By default (``result_type=None``), the final return type
is inferred from the return type of the applied function. Otherwise,
it depends on the `result_type` argument.
Parameters
----------
func : function
Function to apply to each column or row.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis along which the function is applied:
* 0 or 'index': apply function to each column.
* 1 or 'columns': apply function to each row.
raw : bool, default False
Determines if row or column is passed as a Series or ndarray object:
* ``False`` : passes each row or column as a Series to the
function.
* ``True`` : the passed function will receive ndarray objects
instead.
If you are just applying a NumPy reduction function this will
achieve much better performance.
result_type : {'expand', 'reduce', 'broadcast', None}, default None
These only act when ``axis=1`` (columns):
* 'expand' : list-like results will be turned into columns.
* 'reduce' : returns a Series if possible rather than expanding
list-like results. This is the opposite of 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the DataFrame, the original index and columns will be
retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
args : tuple
Positional arguments to pass to `func` in addition to the
array/series.
**kwds
Additional keyword arguments to pass as keywords arguments to
`func`.
Returns
-------
Series or DataFrame
Result of applying ``func`` along the given axis of the
DataFrame.
See Also
--------
DataFrame.applymap: For elementwise operations.
DataFrame.aggregate: Only perform aggregating type operations.
DataFrame.transform: Only perform transforming type operations.
Examples
--------
>>> df = pd.DataFrame([[4, 9]] * 3, columns=['A', 'B'])
>>> df
A B
0 4 9
1 4 9
2 4 9
Using a numpy universal function (in this case the same as
``np.sqrt(df)``):
>>> df.apply(np.sqrt)
A B
0 2.0 3.0
1 2.0 3.0
2 2.0 3.0
Using a reducing function on either axis
>>> df.apply(np.sum, axis=0)
A 12
B 27
dtype: int64
>>> df.apply(np.sum, axis=1)
0 13
1 13
2 13
dtype: int64
Returning a list-like will result in a Series
>>> df.apply(lambda x: [1, 2], axis=1)
0 [1, 2]
1 [1, 2]
2 [1, 2]
dtype: object
Passing result_type='expand' will expand list-like results
to columns of a Dataframe
>>> df.apply(lambda x: [1, 2], axis=1, result_type='expand')
0 1
0 1 2
1 1 2
2 1 2
Returning a Series inside the function is similar to passing
``result_type='expand'``. The resulting column names
will be the Series index.
>>> df.apply(lambda x: pd.Series([1, 2], index=['foo', 'bar']), axis=1)
foo bar
0 1 2
1 1 2
2 1 2
Passing ``result_type='broadcast'`` will ensure the same shape
result, whether list-like or scalar is returned by the function,
and broadcast it along the axis. The resulting column names will
be the originals.
>>> df.apply(lambda x: [1, 2], axis=1, result_type='broadcast')
A B
0 1 2
1 1 2
2 1 2
"""
from pandas.core.apply import frame_apply
op = frame_apply(
self,
func=func,
axis=axis,
raw=raw,
result_type=result_type,
args=args,
kwds=kwds,
)
return op.get_result()
def applymap(self, func) -> "DataFrame":
"""
Apply a function to a Dataframe elementwise.
This method applies a function that accepts and returns a scalar
to every element of a DataFrame.
Parameters
----------
func : callable
Python function, returns a single value from a single value.
Returns
-------
DataFrame
Transformed DataFrame.
See Also
--------
DataFrame.apply : Apply a function along input axis of DataFrame.
Notes
-----
In the current implementation applymap calls `func` twice on the
first column/row to decide whether it can take a fast or slow
code path. This can lead to unexpected behavior if `func` has
side-effects, as they will take effect twice for the first
column/row.
Examples
--------
>>> df = pd.DataFrame([[1, 2.12], [3.356, 4.567]])
>>> df
0 1
0 1.000 2.120
1 3.356 4.567
>>> df.applymap(lambda x: len(str(x)))
0 1
0 3 4
1 5 5
Note that a vectorized version of `func` often exists, which will
be much faster. You could square each number elementwise.
>>> df.applymap(lambda x: x**2)
0 1
0 1.000000 4.494400
1 11.262736 20.857489
But it's better to avoid applymap in that case.
>>> df ** 2
0 1
0 1.000000 4.494400
1 11.262736 20.857489
"""
# if we have a dtype == 'M8[ns]', provide boxed values
def infer(x):
if x.empty:
return lib.map_infer(x, func)
return lib.map_infer(x.astype(object).values, func)
return self.apply(infer)
# ----------------------------------------------------------------------
# Merging / joining methods
def append(
self, other, ignore_index=False, verify_integrity=False, sort=False
) -> "DataFrame":
"""
Append rows of `other` to the end of caller, returning a new object.
Columns in `other` that are not in the caller are added as new columns.
Parameters
----------
other : DataFrame or Series/dict-like object, or list of these
The data to append.
ignore_index : bool, default False
If True, do not use the index labels.
verify_integrity : bool, default False
If True, raise ValueError on creating index with duplicates.
sort : bool, default False
Sort columns if the columns of `self` and `other` are not aligned.
.. versionadded:: 0.23.0
.. versionchanged:: 1.0.0
Changed to not sort by default.
Returns
-------
DataFrame
See Also
--------
concat : General function to concatenate DataFrame or Series objects.
Notes
-----
If a list of dict/series is passed and the keys are all contained in
the DataFrame's index, the order of the columns in the resulting
DataFrame will be unchanged.
Iteratively appending rows to a DataFrame can be more computationally
intensive than a single concatenate. A better solution is to append
those rows to a list and then concatenate the list with the original
DataFrame all at once.
Examples
--------
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=list('AB'))
>>> df
A B
0 1 2
1 3 4
>>> df2 = pd.DataFrame([[5, 6], [7, 8]], columns=list('AB'))
>>> df.append(df2)
A B
0 1 2
1 3 4
0 5 6
1 7 8
With `ignore_index` set to True:
>>> df.append(df2, ignore_index=True)
A B
0 1 2
1 3 4
2 5 6
3 7 8
The following, while not recommended methods for generating DataFrames,
show two ways to generate a DataFrame from multiple data sources.
Less efficient:
>>> df = pd.DataFrame(columns=['A'])
>>> for i in range(5):
... df = df.append({'A': i}, ignore_index=True)
>>> df
A
0 0
1 1
2 2
3 3
4 4
More efficient:
>>> pd.concat([pd.DataFrame([i], columns=['A']) for i in range(5)],
... ignore_index=True)
A
0 0
1 1
2 2
3 3
4 4
"""
if isinstance(other, (Series, dict)):
if isinstance(other, dict):
other = Series(other)
if other.name is None and not ignore_index:
raise TypeError(
"Can only append a Series if ignore_index=True "
"or if the Series has a name"
)
index = Index([other.name], name=self.index.name)
idx_diff = other.index.difference(self.columns)
try:
combined_columns = self.columns.append(idx_diff)
except TypeError:
combined_columns = self.columns.astype(object).append(idx_diff)
other = (
other.reindex(combined_columns, copy=False)
.to_frame()
.T.infer_objects()
.rename_axis(index.names, copy=False)
)
if not self.columns.equals(combined_columns):
self = self.reindex(columns=combined_columns)
elif isinstance(other, list):
if not other:
pass
elif not isinstance(other[0], DataFrame):
other = DataFrame(other)
if (self.columns.get_indexer(other.columns) >= 0).all():
other = other.reindex(columns=self.columns)
from pandas.core.reshape.concat import concat
if isinstance(other, (list, tuple)):
to_concat = [self, *other]
else:
to_concat = [self, other]
return concat(
to_concat,
ignore_index=ignore_index,
verify_integrity=verify_integrity,
sort=sort,
)
def join(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
) -> "DataFrame":
"""
Join columns of another DataFrame.
Join columns with `other` DataFrame either on index or on a key
column. Efficiently join multiple DataFrame objects by index at once by
passing a list.
Parameters
----------
other : DataFrame, Series, or list of DataFrame
Index should be similar to one of the columns in this one. If a
Series is passed, its name attribute must be set, and that will be
used as the column name in the resulting joined DataFrame.
on : str, list of str, or array-like, optional
Column or index level name(s) in the caller to join on the index
in `other`, otherwise joins index-on-index. If multiple
values given, the `other` DataFrame must have a MultiIndex. Can
pass an array as the join key if it is not already contained in
the calling DataFrame. Like an Excel VLOOKUP operation.
how : {'left', 'right', 'outer', 'inner'}, default 'left'
How to handle the operation of the two objects.
* left: use calling frame's index (or column if on is specified)
* right: use `other`'s index.
* outer: form union of calling frame's index (or column if on is
specified) with `other`'s index, and sort it.
lexicographically.
* inner: form intersection of calling frame's index (or column if
on is specified) with `other`'s index, preserving the order
of the calling's one.
lsuffix : str, default ''
Suffix to use from left frame's overlapping columns.
rsuffix : str, default ''
Suffix to use from right frame's overlapping columns.
sort : bool, default False
Order result DataFrame lexicographically by the join key. If False,
the order of the join key depends on the join type (how keyword).
Returns
-------
DataFrame
A dataframe containing columns from both the caller and `other`.
See Also
--------
DataFrame.merge : For column(s)-on-columns(s) operations.
Notes
-----
Parameters `on`, `lsuffix`, and `rsuffix` are not supported when
passing a list of `DataFrame` objects.
Support for specifying index levels as the `on` parameter was added
in version 0.23.0.
Examples
--------
>>> df = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3', 'K4', 'K5'],
... 'A': ['A0', 'A1', 'A2', 'A3', 'A4', 'A5']})
>>> df
key A
0 K0 A0
1 K1 A1
2 K2 A2
3 K3 A3
4 K4 A4
5 K5 A5
>>> other = pd.DataFrame({'key': ['K0', 'K1', 'K2'],
... 'B': ['B0', 'B1', 'B2']})
>>> other
key B
0 K0 B0
1 K1 B1
2 K2 B2
Join DataFrames using their indexes.
>>> df.join(other, lsuffix='_caller', rsuffix='_other')
key_caller A key_other B
0 K0 A0 K0 B0
1 K1 A1 K1 B1
2 K2 A2 K2 B2
3 K3 A3 NaN NaN
4 K4 A4 NaN NaN
5 K5 A5 NaN NaN
If we want to join using the key columns, we need to set key to be
the index in both `df` and `other`. The joined DataFrame will have
key as its index.
>>> df.set_index('key').join(other.set_index('key'))
A B
key
K0 A0 B0
K1 A1 B1
K2 A2 B2
K3 A3 NaN
K4 A4 NaN
K5 A5 NaN
Another option to join using the key columns is to use the `on`
parameter. DataFrame.join always uses `other`'s index but we can use
any column in `df`. This method preserves the original DataFrame's
index in the result.
>>> df.join(other.set_index('key'), on='key')
key A B
0 K0 A0 B0
1 K1 A1 B1
2 K2 A2 B2
3 K3 A3 NaN
4 K4 A4 NaN
5 K5 A5 NaN
"""
return self._join_compat(
other, on=on, how=how, lsuffix=lsuffix, rsuffix=rsuffix, sort=sort
)
def _join_compat(
self, other, on=None, how="left", lsuffix="", rsuffix="", sort=False
):
from pandas.core.reshape.merge import merge
from pandas.core.reshape.concat import concat
if isinstance(other, Series):
if other.name is None:
raise ValueError("Other Series must have a name")
other = DataFrame({other.name: other})
if isinstance(other, DataFrame):
return merge(
self,
other,
left_on=on,
how=how,
left_index=on is None,
right_index=True,
suffixes=(lsuffix, rsuffix),
sort=sort,
)
else:
if on is not None:
raise ValueError(
"Joining multiple DataFrames only supported for joining on index"
)
frames = [self] + list(other)
can_concat = all(df.index.is_unique for df in frames)
# join indexes only using concat
if can_concat:
if how == "left":
res = concat(
frames, axis=1, join="outer", verify_integrity=True, sort=sort
)
return res.reindex(self.index, copy=False)
else:
return concat(
frames, axis=1, join=how, verify_integrity=True, sort=sort
)
joined = frames[0]
for frame in frames[1:]:
joined = merge(
joined, frame, how=how, left_index=True, right_index=True
)
return joined
@Substitution("")
@Appender(_merge_doc, indents=2)
def merge(
self,
right,
how="inner",
on=None,
left_on=None,
right_on=None,
left_index=False,
right_index=False,
sort=False,
suffixes=("_x", "_y"),
copy=True,
indicator=False,
validate=None,
) -> "DataFrame":
from pandas.core.reshape.merge import merge
return merge(
self,
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
sort=sort,
suffixes=suffixes,
copy=copy,
indicator=indicator,
validate=validate,
)
def round(self, decimals=0, *args, **kwargs) -> "DataFrame":
"""
Round a DataFrame to a variable number of decimal places.
Parameters
----------
decimals : int, dict, Series
Number of decimal places to round each column to. If an int is
given, round each column to the same number of places.
Otherwise dict and Series round to variable numbers of places.
Column names should be in the keys if `decimals` is a
dict-like, or in the index if `decimals` is a Series. Any
columns not included in `decimals` will be left as is. Elements
of `decimals` which are not columns of the input will be
ignored.
*args
Additional keywords have no effect but might be accepted for
compatibility with numpy.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
DataFrame
A DataFrame with the affected columns rounded to the specified
number of decimal places.
See Also
--------
numpy.around : Round a numpy array to the given number of decimals.
Series.round : Round a Series to the given number of decimals.
Examples
--------
>>> df = pd.DataFrame([(.21, .32), (.01, .67), (.66, .03), (.21, .18)],
... columns=['dogs', 'cats'])
>>> df
dogs cats
0 0.21 0.32
1 0.01 0.67
2 0.66 0.03
3 0.21 0.18
By providing an integer each column is rounded to the same number
of decimal places
>>> df.round(1)
dogs cats
0 0.2 0.3
1 0.0 0.7
2 0.7 0.0
3 0.2 0.2
With a dict, the number of places for specific columns can be
specified with the column names as key and the number of decimal
places as value
>>> df.round({'dogs': 1, 'cats': 0})
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
Using a Series, the number of places for specific columns can be
specified with the column names as index and the number of
decimal places as value
>>> decimals = pd.Series([0, 1], index=['cats', 'dogs'])
>>> df.round(decimals)
dogs cats
0 0.2 0.0
1 0.0 1.0
2 0.7 0.0
3 0.2 0.0
"""
from pandas.core.reshape.concat import concat
def _dict_round(df, decimals):
for col, vals in df.items():
try:
yield _series_round(vals, decimals[col])
except KeyError:
yield vals
def _series_round(s, decimals):
if is_integer_dtype(s) or is_float_dtype(s):
return s.round(decimals)
return s
nv.validate_round(args, kwargs)
if isinstance(decimals, (dict, Series)):
if isinstance(decimals, Series):
if not decimals.index.is_unique:
raise ValueError("Index of decimals must be unique")
new_cols = list(_dict_round(self, decimals))
elif is_integer(decimals):
# Dispatch to Series.round
new_cols = [_series_round(v, decimals) for _, v in self.items()]
else:
raise TypeError("decimals must be an integer, a dict-like or a Series")
if len(new_cols) > 0:
return self._constructor(
concat(new_cols, axis=1), index=self.index, columns=self.columns
)
else:
return self
# ----------------------------------------------------------------------
# Statistical methods, etc.
def corr(self, method="pearson", min_periods=1) -> "DataFrame":
"""
Compute pairwise correlation of columns, excluding NA/null values.
Parameters
----------
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float. Note that the returned matrix from corr
will have 1 along the diagonals and will be symmetric
regardless of the callable's behavior.
.. versionadded:: 0.24.0
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result. Currently only available for Pearson
and Spearman correlation.
Returns
-------
DataFrame
Correlation matrix.
See Also
--------
DataFrame.corrwith
Series.corr
Examples
--------
>>> def histogram_intersection(a, b):
... v = np.minimum(a, b).sum().round(decimals=1)
... return v
>>> df = pd.DataFrame([(.2, .3), (.0, .6), (.6, .0), (.2, .1)],
... columns=['dogs', 'cats'])
>>> df.corr(method=histogram_intersection)
dogs cats
dogs 1.0 0.3
cats 0.3 1.0
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if method == "pearson":
correl = libalgos.nancorr(ensure_float64(mat), minp=min_periods)
elif method == "spearman":
correl = libalgos.nancorr_spearman(ensure_float64(mat), minp=min_periods)
elif method == "kendall" or callable(method):
if min_periods is None:
min_periods = 1
mat = ensure_float64(mat).T
corrf = nanops.get_corr_func(method)
K = len(cols)
correl = np.empty((K, K), dtype=float)
mask = np.isfinite(mat)
for i, ac in enumerate(mat):
for j, bc in enumerate(mat):
if i > j:
continue
valid = mask[i] & mask[j]
if valid.sum() < min_periods:
c = np.nan
elif i == j:
c = 1.0
elif not valid.all():
c = corrf(ac[valid], bc[valid])
else:
c = corrf(ac, bc)
correl[i, j] = c
correl[j, i] = c
else:
raise ValueError(
"method must be either 'pearson', "
"'spearman', 'kendall', or a callable, "
f"'{method}' was supplied"
)
return self._constructor(correl, index=idx, columns=cols)
def cov(self, min_periods=None) -> "DataFrame":
"""
Compute pairwise covariance of columns, excluding NA/null values.
Compute the pairwise covariance among the series of a DataFrame.
The returned data frame is the `covariance matrix
<https://en.wikipedia.org/wiki/Covariance_matrix>`__ of the columns
of the DataFrame.
Both NA and null values are automatically excluded from the
calculation. (See the note below about bias from missing values.)
A threshold can be set for the minimum number of
observations for each value created. Comparisons with observations
below this threshold will be returned as ``NaN``.
This method is generally used for the analysis of time series data to
understand the relationship between different measures
across time.
Parameters
----------
min_periods : int, optional
Minimum number of observations required per pair of columns
to have a valid result.
Returns
-------
DataFrame
The covariance matrix of the series of the DataFrame.
See Also
--------
Series.cov : Compute covariance with another Series.
core.window.EWM.cov: Exponential weighted sample covariance.
core.window.Expanding.cov : Expanding sample covariance.
core.window.Rolling.cov : Rolling sample covariance.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-1.
For DataFrames that have Series that are missing data (assuming that
data is `missing at random
<https://en.wikipedia.org/wiki/Missing_data#Missing_at_random>`__)
the returned covariance matrix will be an unbiased estimate
of the variance and covariance between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be positive
semi-definite. This could lead to estimate correlations having
absolute values which are greater than one, and/or a non-invertible
covariance matrix. See `Estimation of covariance matrices
<http://en.wikipedia.org/w/index.php?title=Estimation_of_covariance_
matrices>`__ for more details.
Examples
--------
>>> df = pd.DataFrame([(1, 2), (0, 3), (2, 0), (1, 1)],
... columns=['dogs', 'cats'])
>>> df.cov()
dogs cats
dogs 0.666667 -1.000000
cats -1.000000 1.666667
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(1000, 5),
... columns=['a', 'b', 'c', 'd', 'e'])
>>> df.cov()
a b c d e
a 0.998438 -0.020161 0.059277 -0.008943 0.014144
b -0.020161 1.059352 -0.008543 -0.024738 0.009826
c 0.059277 -0.008543 1.010670 -0.001486 -0.000271
d -0.008943 -0.024738 -0.001486 0.921297 -0.013692
e 0.014144 0.009826 -0.000271 -0.013692 0.977795
**Minimum number of periods**
This method also supports an optional ``min_periods`` keyword
that specifies the required minimum number of non-NA observations for
each column pair in order to have a valid result:
>>> np.random.seed(42)
>>> df = pd.DataFrame(np.random.randn(20, 3),
... columns=['a', 'b', 'c'])
>>> df.loc[df.index[:5], 'a'] = np.nan
>>> df.loc[df.index[5:10], 'b'] = np.nan
>>> df.cov(min_periods=12)
a b c
a 0.316741 NaN -0.150812
b NaN 1.248003 0.191417
c -0.150812 0.191417 0.895202
"""
numeric_df = self._get_numeric_data()
cols = numeric_df.columns
idx = cols.copy()
mat = numeric_df.values
if notna(mat).all():
if min_periods is not None and min_periods > len(mat):
baseCov = np.empty((mat.shape[1], mat.shape[1]))
baseCov.fill(np.nan)
else:
baseCov = np.cov(mat.T)
baseCov = baseCov.reshape((len(cols), len(cols)))
else:
baseCov = libalgos.nancorr(ensure_float64(mat), cov=True, minp=min_periods)
return self._constructor(baseCov, index=idx, columns=cols)
def corrwith(self, other, axis=0, drop=False, method="pearson") -> Series:
"""
Compute pairwise correlation.
Pairwise correlation is computed between rows or columns of
DataFrame with rows or columns of Series or DataFrame. DataFrames
are first aligned along both axes before computing the
correlations.
Parameters
----------
other : DataFrame, Series
Object with which to compute correlations.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' to compute column-wise, 1 or 'columns' for
row-wise.
drop : bool, default False
Drop missing indices from result.
method : {'pearson', 'kendall', 'spearman'} or callable
Method of correlation:
* pearson : standard correlation coefficient
* kendall : Kendall Tau correlation coefficient
* spearman : Spearman rank correlation
* callable: callable with input two 1d ndarrays
and returning a float.
.. versionadded:: 0.24.0
Returns
-------
Series
Pairwise correlations.
See Also
--------
DataFrame.corr
"""
axis = self._get_axis_number(axis)
this = self._get_numeric_data()
if isinstance(other, Series):
return this.apply(lambda x: other.corr(x, method=method), axis=axis)
other = other._get_numeric_data()
left, right = this.align(other, join="inner", copy=False)
if axis == 1:
left = left.T
right = right.T
if method == "pearson":
# mask missing values
left = left + right * 0
right = right + left * 0
# demeaned data
ldem = left - left.mean()
rdem = right - right.mean()
num = (ldem * rdem).sum()
dom = (left.count() - 1) * left.std() * right.std()
correl = num / dom
elif method in ["kendall", "spearman"] or callable(method):
def c(x):
return nanops.nancorr(x[0], x[1], method=method)
correl = Series(
map(c, zip(left.values.T, right.values.T)), index=left.columns
)
else:
raise ValueError(
f"Invalid method {method} was passed, "
"valid methods are: 'pearson', 'kendall', "
"'spearman', or callable"
)
if not drop:
# Find non-matching labels along the given axis
# and append missing correlations (GH 22375)
raxis = 1 if axis == 0 else 0
result_index = this._get_axis(raxis).union(other._get_axis(raxis))
idx_diff = result_index.difference(correl.index)
if len(idx_diff) > 0:
correl = correl.append(Series([np.nan] * len(idx_diff), index=idx_diff))
return correl
# ----------------------------------------------------------------------
# ndarray-like stats methods
def count(self, axis=0, level=None, numeric_only=False):
"""
Count non-NA cells for each column or row.
The values `None`, `NaN`, `NaT`, and optionally `numpy.inf` (depending
on `pandas.options.mode.use_inf_as_na`) are considered NA.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
If 0 or 'index' counts are generated for each column.
If 1 or 'columns' counts are generated for each **row**.
level : int or str, optional
If the axis is a `MultiIndex` (hierarchical), count along a
particular `level`, collapsing into a `DataFrame`.
A `str` specifies the level name.
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Returns
-------
Series or DataFrame
For each column/row the number of non-NA/null entries.
If `level` is specified returns a `DataFrame`.
See Also
--------
Series.count: Number of non-NA elements in a Series.
DataFrame.shape: Number of DataFrame rows and columns (including NA
elements).
DataFrame.isna: Boolean same-sized DataFrame showing places of NA
elements.
Examples
--------
Constructing DataFrame from a dictionary:
>>> df = pd.DataFrame({"Person":
... ["John", "Myla", "Lewis", "John", "Myla"],
... "Age": [24., np.nan, 21., 33, 26],
... "Single": [False, True, True, True, False]})
>>> df
Person Age Single
0 John 24.0 False
1 Myla NaN True
2 Lewis 21.0 True
3 John 33.0 True
4 Myla 26.0 False
Notice the uncounted NA values:
>>> df.count()
Person 5
Age 4
Single 5
dtype: int64
Counts for each **row**:
>>> df.count(axis='columns')
0 3
1 2
2 3
3 3
4 3
dtype: int64
Counts for one level of a `MultiIndex`:
>>> df.set_index(["Person", "Single"]).count(level="Person")
Age
Person
John 2
Lewis 1
Myla 1
"""
axis = self._get_axis_number(axis)
if level is not None:
return self._count_level(level, axis=axis, numeric_only=numeric_only)
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
# GH #423
if len(frame._get_axis(axis)) == 0:
result = Series(0, index=frame._get_agg_axis(axis))
else:
if frame._is_mixed_type or frame._data.any_extension_types:
# the or any_extension_types is really only hit for single-
# column frames with an extension array
result = notna(frame).sum(axis=axis)
else:
# GH13407
series_counts = notna(frame).sum(axis=axis)
counts = series_counts.values
result = Series(counts, index=frame._get_agg_axis(axis))
return result.astype("int64")
def _count_level(self, level, axis=0, numeric_only=False):
if numeric_only:
frame = self._get_numeric_data()
else:
frame = self
count_axis = frame._get_axis(axis)
agg_axis = frame._get_agg_axis(axis)
if not isinstance(count_axis, ABCMultiIndex):
raise TypeError(
f"Can only count levels on hierarchical {self._get_axis_name(axis)}."
)
if frame._is_mixed_type:
# Since we have mixed types, calling notna(frame.values) might
# upcast everything to object
mask = notna(frame).values
else:
# But use the speedup when we have homogeneous dtypes
mask = notna(frame.values)
if axis == 1:
# We're transposing the mask rather than frame to avoid potential
# upcasts to object, which induces a ~20x slowdown
mask = mask.T
if isinstance(level, str):
level = count_axis._get_level_number(level)
level_name = count_axis._names[level]
level_index = count_axis.levels[level]._shallow_copy(name=level_name)
level_codes = ensure_int64(count_axis.codes[level])
counts = lib.count_level_2d(mask, level_codes, len(level_index), axis=0)
result = DataFrame(counts, index=level_index, columns=agg_axis)
if axis == 1:
# Undo our earlier transpose
return result.T
else:
return result
def _reduce(
self, op, name, axis=0, skipna=True, numeric_only=None, filter_type=None, **kwds
):
if axis is None and filter_type == "bool":
labels = None
constructor = None
else:
# TODO: Make other agg func handle axis=None properly
axis = self._get_axis_number(axis)
labels = self._get_agg_axis(axis)
constructor = self._constructor
def f(x):
return op(x, axis=axis, skipna=skipna, **kwds)
def _get_data(axis_matters):
if filter_type is None or filter_type == "numeric":
data = self._get_numeric_data()
elif filter_type == "bool":
if axis_matters:
# GH#25101, GH#24434
data = self._get_bool_data() if axis == 0 else self
else:
data = self._get_bool_data()
else: # pragma: no cover
msg = (
f"Generating numeric_only data with filter_type {filter_type} "
"not supported."
)
raise NotImplementedError(msg)
return data
if numeric_only is not None and axis in [0, 1]:
df = self
if numeric_only is True:
df = _get_data(axis_matters=True)
if axis == 1:
df = df.T
axis = 0
out_dtype = "bool" if filter_type == "bool" else None
# After possibly _get_data and transposing, we are now in the
# simple case where we can use BlockManager._reduce
res = df._data.reduce(op, axis=1, skipna=skipna, **kwds)
assert isinstance(res, dict)
if len(res):
assert len(res) == max(list(res.keys())) + 1, res.keys()
out = df._constructor_sliced(res, index=range(len(res)), dtype=out_dtype)
out.index = df.columns
return out
if numeric_only is None:
values = self.values
try:
result = f(values)
if filter_type == "bool" and is_object_dtype(values) and axis is None:
# work around https://github.com/numpy/numpy/issues/10489
# TODO: combine with hasattr(result, 'dtype') further down
# hard since we don't have `values` down there.
result = np.bool_(result)
except TypeError:
# e.g. in nanops trying to convert strs to float
# try by-column first
if filter_type is None and axis == 0:
# this can end up with a non-reduction
# but not always. if the types are mixed
# with datelike then need to make sure a series
# we only end up here if we have not specified
# numeric_only and yet we have tried a
# column-by-column reduction, where we have mixed type.
# So let's just do what we can
from pandas.core.apply import frame_apply
opa = frame_apply(
self, func=f, result_type="expand", ignore_failures=True
)
result = opa.get_result()
if result.ndim == self.ndim:
result = result.iloc[0]
return result
# TODO: why doesnt axis matter here?
data = _get_data(axis_matters=False)
with np.errstate(all="ignore"):
result = f(data.values)
labels = data._get_agg_axis(axis)
else:
if numeric_only:
data = _get_data(axis_matters=True)
values = data.values
labels = data._get_agg_axis(axis)
else:
values = self.values
result = f(values)
if hasattr(result, "dtype") and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == "numeric":
result = result.astype(np.float64)
elif filter_type == "bool" and notna(result).all():
result = result.astype(np.bool_)
except (ValueError, TypeError):
# try to coerce to the original dtypes item by item if we can
if axis == 0:
result = coerce_to_dtypes(result, self.dtypes)
if constructor is not None:
result = Series(result, index=labels)
return result
def nunique(self, axis=0, dropna=True) -> Series:
"""
Count distinct observations over requested axis.
Return Series with number of distinct observations. Can ignore NaN
values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for
column-wise.
dropna : bool, default True
Don't include NaN in the counts.
Returns
-------
Series
See Also
--------
Series.nunique: Method nunique for Series.
DataFrame.count: Count non-NA cells for each column or row.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [1, 1, 1]})
>>> df.nunique()
A 3
B 1
dtype: int64
>>> df.nunique(axis=1)
0 1
1 2
2 2
dtype: int64
"""
return self.apply(Series.nunique, axis=axis, dropna=dropna)
def idxmin(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of minimum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of minima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmin
Notes
-----
This method is the DataFrame version of ``ndarray.argmin``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmin(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def idxmax(self, axis=0, skipna=True) -> Series:
"""
Return index of first occurrence of maximum over requested axis.
NA/null values are excluded.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to use. 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
Returns
-------
Series
Indexes of maxima along the specified axis.
Raises
------
ValueError
* If the row/column is empty
See Also
--------
Series.idxmax
Notes
-----
This method is the DataFrame version of ``ndarray.argmax``.
"""
axis = self._get_axis_number(axis)
indices = nanops.nanargmax(self.values, axis=axis, skipna=skipna)
index = self._get_axis(axis)
result = [index[i] if i >= 0 else np.nan for i in indices]
return Series(result, index=self._get_agg_axis(axis))
def _get_agg_axis(self, axis_num):
"""
Let's be explicit about this.
"""
if axis_num == 0:
return self.columns
elif axis_num == 1:
return self.index
else:
raise ValueError(f"Axis must be 0 or 1 (got {repr(axis_num)})")
def mode(self, axis=0, numeric_only=False, dropna=True) -> "DataFrame":
"""
Get the mode(s) of each element along the selected axis.
The mode of a set of values is the value that appears most often.
It can be multiple values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to iterate over while searching for the mode:
* 0 or 'index' : get mode of each column
* 1 or 'columns' : get mode of each row.
numeric_only : bool, default False
If True, only apply to numeric columns.
dropna : bool, default True
Don't consider counts of NaN/NaT.
.. versionadded:: 0.24.0
Returns
-------
DataFrame
The modes of each column or row.
See Also
--------
Series.mode : Return the highest frequency value in a Series.
Series.value_counts : Return the counts of values in a Series.
Examples
--------
>>> df = pd.DataFrame([('bird', 2, 2),
... ('mammal', 4, np.nan),
... ('arthropod', 8, 0),
... ('bird', 2, np.nan)],
... index=('falcon', 'horse', 'spider', 'ostrich'),
... columns=('species', 'legs', 'wings'))
>>> df
species legs wings
falcon bird 2 2.0
horse mammal 4 NaN
spider arthropod 8 0.0
ostrich bird 2 NaN
By default, missing values are not considered, and the mode of wings
are both 0 and 2. The second row of species and legs contains ``NaN``,
because they have only one mode, but the DataFrame has two rows.
>>> df.mode()
species legs wings
0 bird 2.0 0.0
1 NaN NaN 2.0
Setting ``dropna=False`` ``NaN`` values are considered and they can be
the mode (like for wings).
>>> df.mode(dropna=False)
species legs wings
0 bird 2 NaN
Setting ``numeric_only=True``, only the mode of numeric columns is
computed, and columns of other types are ignored.
>>> df.mode(numeric_only=True)
legs wings
0 2.0 0.0
1 NaN 2.0
To compute the mode over columns and not rows, use the axis parameter:
>>> df.mode(axis='columns', numeric_only=True)
0 1
falcon 2.0 NaN
horse 4.0 NaN
spider 0.0 8.0
ostrich 2.0 NaN
"""
data = self if not numeric_only else self._get_numeric_data()
def f(s):
return s.mode(dropna=dropna)
return data.apply(f, axis=axis)
def quantile(self, q=0.5, axis=0, numeric_only=True, interpolation="linear"):
"""
Return values at the given quantile over requested axis.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
Value between 0 <= q <= 1, the quantile(s) to compute.
axis : {0, 1, 'index', 'columns'} (default 0)
Equals 0 or 'index' for row-wise, 1 or 'columns' for column-wise.
numeric_only : bool, default True
If False, the quantile of datetime and timedelta data will be
computed as well.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
Series or DataFrame
If ``q`` is an array, a DataFrame will be returned where the
index is ``q``, the columns are the columns of self, and the
values are the quantiles.
If ``q`` is a float, a Series will be returned where the
index is the columns of self and the values are the quantiles.
See Also
--------
core.window.Rolling.quantile: Rolling quantile.
numpy.percentile: Numpy function to compute the percentile.
Examples
--------
>>> df = pd.DataFrame(np.array([[1, 1], [2, 10], [3, 100], [4, 100]]),
... columns=['a', 'b'])
>>> df.quantile(.1)
a 1.3
b 3.7
Name: 0.1, dtype: float64
>>> df.quantile([.1, .5])
a b
0.1 1.3 3.7
0.5 2.5 55.0
Specifying `numeric_only=False` will also compute the quantile of
datetime and timedelta data.
>>> df = pd.DataFrame({'A': [1, 2],
... 'B': [pd.Timestamp('2010'),
... pd.Timestamp('2011')],
... 'C': [pd.Timedelta('1 days'),
... pd.Timedelta('2 days')]})
>>> df.quantile(0.5, numeric_only=False)
A 1.5
B 2010-07-02 12:00:00
C 1 days 12:00:00
Name: 0.5, dtype: object
"""
validate_percentile(q)
data = self._get_numeric_data() if numeric_only else self
axis = self._get_axis_number(axis)
is_transposed = axis == 1
if is_transposed:
data = data.T
if len(data.columns) == 0:
# GH#23925 _get_numeric_data may have dropped all columns
cols = Index([], name=self.columns.name)
if is_list_like(q):
return self._constructor([], index=q, columns=cols)
return self._constructor_sliced([], index=cols, name=q, dtype=np.float64)
result = data._data.quantile(
qs=q, axis=1, interpolation=interpolation, transposed=is_transposed
)
if result.ndim == 2:
result = self._constructor(result)
else:
result = self._constructor_sliced(result, name=q)
if is_transposed:
result = result.T
return result
def to_timestamp(self, freq=None, how="start", axis=0, copy=True) -> "DataFrame":
"""
Cast to DatetimeIndex of timestamps, at *beginning* of period.
Parameters
----------
freq : str, default frequency of PeriodIndex
Desired frequency.
how : {'s', 'e', 'start', 'end'}
Convention for converting period to timestamp; start of period
vs. end.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
DataFrame with DatetimeIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_timestamp(freq=freq, how=how))
elif axis == 1:
new_data.set_axis(0, self.columns.to_timestamp(freq=freq, how=how))
else: # pragma: no cover
raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
return self._constructor(new_data)
def to_period(self, freq=None, axis=0, copy=True) -> "DataFrame":
"""
Convert DataFrame from DatetimeIndex to PeriodIndex.
Convert DataFrame from DatetimeIndex to PeriodIndex with desired
frequency (inferred from index if not passed).
Parameters
----------
freq : str, default
Frequency of the PeriodIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to convert (the index by default).
copy : bool, default True
If False then underlying input data is not copied.
Returns
-------
TimeSeries with PeriodIndex
"""
new_data = self._data
if copy:
new_data = new_data.copy()
axis = self._get_axis_number(axis)
if axis == 0:
new_data.set_axis(1, self.index.to_period(freq=freq))
elif axis == 1:
new_data.set_axis(0, self.columns.to_period(freq=freq))
else: # pragma: no cover
raise AssertionError(f"Axis must be 0 or 1. Got {axis}")
return self._constructor(new_data)
def isin(self, values) -> "DataFrame":
"""
Whether each element in the DataFrame is contained in values.
Parameters
----------
values : iterable, Series, DataFrame or dict
The result will only be true at a location if all the
labels match. If `values` is a Series, that's the index. If
`values` is a dict, the keys must be the column names,
which must match. If `values` is a DataFrame,
then both the index and column labels must match.
Returns
-------
DataFrame
DataFrame of booleans showing whether each element in the DataFrame
is contained in values.
See Also
--------
DataFrame.eq: Equality test for DataFrame.
Series.isin: Equivalent method on Series.
Series.str.contains: Test if pattern or regex is contained within a
string of a Series or Index.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4], 'num_wings': [2, 0]},
... index=['falcon', 'dog'])
>>> df
num_legs num_wings
falcon 2 2
dog 4 0
When ``values`` is a list check whether every value in the DataFrame
is present in the list (which animals have 0 or 2 legs or wings)
>>> df.isin([0, 2])
num_legs num_wings
falcon True True
dog False True
When ``values`` is a dict, we can pass values to check for each
column separately:
>>> df.isin({'num_wings': [0, 3]})
num_legs num_wings
falcon False False
dog False True
When ``values`` is a Series or DataFrame the index and column must
match. Note that 'falcon' does not match based on the number of legs
in df2.
>>> other = pd.DataFrame({'num_legs': [8, 2], 'num_wings': [0, 2]},
... index=['spider', 'falcon'])
>>> df.isin(other)
num_legs num_wings
falcon True True
dog False False
"""
if isinstance(values, dict):
from pandas.core.reshape.concat import concat
values = collections.defaultdict(list, values)
return self._ensure_type(
concat(
(
self.iloc[:, [i]].isin(values[col])
for i, col in enumerate(self.columns)
),
axis=1,
)
)
elif isinstance(values, Series):
if not values.index.is_unique:
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self), axis="index")
elif isinstance(values, DataFrame):
if not (values.columns.is_unique and values.index.is_unique):
raise ValueError("cannot compute isin with a duplicate axis.")
return self.eq(values.reindex_like(self))
else:
if not is_list_like(values):
raise TypeError(
"only list-like or dict-like objects are allowed "
"to be passed to DataFrame.isin(), "
f"you passed a {repr(type(values).__name__)}"
)
return DataFrame(
algorithms.isin(self.values.ravel(), values).reshape(self.shape),
self.index,
self.columns,
)
# ----------------------------------------------------------------------
# Add plotting methods to DataFrame
plot = CachedAccessor("plot", pandas.plotting.PlotAccessor)
hist = pandas.plotting.hist_frame
boxplot = pandas.plotting.boxplot_frame
sparse = CachedAccessor("sparse", SparseFrameAccessor)
DataFrame._setup_axes(
["index", "columns"],
docs={
"index": "The index (row labels) of the DataFrame.",
"columns": "The column labels of the DataFrame.",
},
)
DataFrame._add_numeric_operations()
DataFrame._add_series_or_dataframe_operations()
ops.add_flex_arithmetic_methods(DataFrame)
ops.add_special_arithmetic_methods(DataFrame)
def _from_nested_dict(data):
# TODO: this should be seriously cythonized
new_data = {}
for index, s in data.items():
for col, v in s.items():
new_data[col] = new_data.get(col, {})
new_data[col][index] = v
return new_data
def _put_str(s, space):
return str(s)[:space].ljust(space)
|
py | 1a49392f16b44e67cbacb72a2e4bdad3849fa182 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals, print_function
import frappe
import unittest, json, sys, os
import xmlrunner
import importlib
from frappe.modules import load_doctype_module, get_module_name
from frappe.utils import cstr
import frappe.utils.scheduler
import cProfile, pstats
from six import StringIO
from six.moves import reload_module
from frappe.model.naming import revert_series_if_last
unittest_runner = unittest.TextTestRunner
def xmlrunner_wrapper(output):
"""Convenience wrapper to keep method signature unchanged for XMLTestRunner and TextTestRunner"""
def _runner(*args, **kwargs):
kwargs['output'] = output
return xmlrunner.XMLTestRunner(*args, **kwargs)
return _runner
def main(app=None, module=None, doctype=None, verbose=False, tests=(),
force=False, profile=False, junit_xml_output=None, ui_tests=False,
doctype_list_path=None, skip_test_records=False, failfast=False):
global unittest_runner
if doctype_list_path:
app, doctype_list_path = doctype_list_path.split(os.path.sep, 1)
with open(frappe.get_app_path(app, doctype_list_path), 'r') as f:
doctype = f.read().strip().splitlines()
xmloutput_fh = None
if junit_xml_output:
xmloutput_fh = open(junit_xml_output, 'wb')
unittest_runner = xmlrunner_wrapper(xmloutput_fh)
else:
unittest_runner = unittest.TextTestRunner
try:
frappe.flags.print_messages = verbose
frappe.flags.in_test = True
if not frappe.db:
frappe.connect()
# if not frappe.conf.get("db_name").startswith("test_"):
# raise Exception, 'db_name must start with "test_"'
# workaround! since there is no separate test db
frappe.clear_cache()
frappe.utils.scheduler.disable_scheduler()
set_test_email_config()
if not frappe.flags.skip_before_tests:
if verbose:
print('Running "before_tests" hooks')
for fn in frappe.get_hooks("before_tests", app_name=app):
frappe.get_attr(fn)()
if doctype:
ret = run_tests_for_doctype(doctype, verbose, tests, force, profile)
elif module:
ret = run_tests_for_module(module, verbose, tests, profile)
else:
ret = run_all_tests(app, verbose, profile, ui_tests, failfast=failfast)
frappe.db.commit()
# workaround! since there is no separate test db
frappe.clear_cache()
return ret
finally:
if xmloutput_fh:
xmloutput_fh.flush()
xmloutput_fh.close()
def set_test_email_config():
frappe.conf.update({
"auto_email_id": "[email protected]",
"mail_server": "smtp.example.com",
"mail_login": "[email protected]",
"mail_password": "test",
"admin_password": "admin"
})
def run_all_tests(app=None, verbose=False, profile=False, ui_tests=False, failfast=False):
import os
apps = [app] if app else frappe.get_installed_apps()
test_suite = unittest.TestSuite()
for app in apps:
for path, folders, files in os.walk(frappe.get_pymodule_path(app)):
for dontwalk in ('locals', '.git', 'public'):
if dontwalk in folders:
folders.remove(dontwalk)
# print path
for filename in files:
filename = cstr(filename)
if filename.startswith("test_") and filename.endswith(".py")\
and filename != 'test_runner.py':
# print filename[:-3]
_add_test(app, path, filename, verbose,
test_suite, ui_tests)
runner = unittest_runner(verbosity=1+(verbose and 1 or 0), failfast=failfast)
if profile:
pr = cProfile.Profile()
pr.enable()
out = runner.run(test_suite)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return out
def run_tests_for_doctype(doctypes, verbose=False, tests=(), force=False, profile=False):
modules = []
if not isinstance(doctypes, (list, tuple)):
doctypes = [doctypes]
for doctype in doctypes:
module = frappe.db.get_value("DocType", doctype, "module")
if not module:
print('Invalid doctype {0}'.format(doctype))
sys.exit(1)
test_module = get_module_name(doctype, module, "test_")
if force:
for name in frappe.db.sql_list("select name from `tab%s`" % doctype):
frappe.delete_doc(doctype, name, force=True)
make_test_records(doctype, verbose=verbose, force=force)
modules.append(importlib.import_module(test_module))
return _run_unittest(modules, verbose=verbose, tests=tests, profile=profile)
def run_tests_for_module(module, verbose=False, tests=(), profile=False):
module = importlib.import_module(module)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
return _run_unittest(module, verbose=verbose, tests=tests, profile=profile)
def run_setup_wizard_ui_test(app=None, verbose=False, profile=False):
'''Run setup wizard UI test using test_test_runner'''
frappe.flags.run_setup_wizard_ui_test = 1
return run_ui_tests(app=app, test=None, verbose=verbose, profile=profile)
def run_ui_tests(app=None, test=None, test_list=None, verbose=False, profile=False):
'''Run a single unit test for UI using test_test_runner'''
module = importlib.import_module('frappe.tests.ui.test_test_runner')
frappe.flags.ui_test_app = app
if test_list:
frappe.flags.ui_test_list = test_list
else:
frappe.flags.ui_test_path = test
return _run_unittest(module, verbose=verbose, tests=(), profile=profile)
def _run_unittest(modules, verbose=False, tests=(), profile=False):
test_suite = unittest.TestSuite()
if not isinstance(modules, (list, tuple)):
modules = [modules]
for module in modules:
module_test_cases = unittest.TestLoader().loadTestsFromModule(module)
if tests:
for each in module_test_cases:
for test_case in each.__dict__["_tests"]:
if test_case.__dict__["_testMethodName"] in tests:
test_suite.addTest(test_case)
else:
test_suite.addTest(module_test_cases)
runner = unittest_runner(verbosity=1+(verbose and 1 or 0))
if profile:
pr = cProfile.Profile()
pr.enable()
frappe.flags.tests_verbose = verbose
out = runner.run(test_suite)
if profile:
pr.disable()
s = StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats()
print(s.getvalue())
return out
def _add_test(app, path, filename, verbose, test_suite=None, ui_tests=False):
import os
if os.path.sep.join(["doctype", "doctype", "boilerplate"]) in path:
# in /doctype/doctype/boilerplate/
return
app_path = frappe.get_pymodule_path(app)
relative_path = os.path.relpath(path, app_path)
if relative_path=='.':
module_name = app
else:
module_name = '{app}.{relative_path}.{module_name}'.format(app=app,
relative_path=relative_path.replace('/', '.'), module_name=filename[:-3])
module = importlib.import_module(module_name)
if hasattr(module, "test_dependencies"):
for doctype in module.test_dependencies:
make_test_records(doctype, verbose=verbose)
is_ui_test = True if hasattr(module, 'TestDriver') else False
if is_ui_test != ui_tests:
return
if not test_suite:
test_suite = unittest.TestSuite()
if os.path.basename(os.path.dirname(path))=="doctype":
txt_file = os.path.join(path, filename[5:].replace(".py", ".json"))
with open(txt_file, 'r') as f:
doc = json.loads(f.read())
doctype = doc["name"]
make_test_records(doctype, verbose)
test_suite.addTest(unittest.TestLoader().loadTestsFromModule(module))
def make_test_records(doctype, verbose=0, force=False):
if not frappe.db:
frappe.connect()
if frappe.flags.skip_test_records:
return
for options in get_dependencies(doctype):
if options == "[Select]":
continue
if not options in frappe.local.test_objects:
frappe.local.test_objects[options] = []
make_test_records(options, verbose, force)
make_test_records_for_doctype(options, verbose, force)
def get_modules(doctype):
module = frappe.db.get_value("DocType", doctype, "module")
try:
test_module = load_doctype_module(doctype, module, "test_")
if test_module:
reload_module(test_module)
except ImportError:
test_module = None
return module, test_module
def get_dependencies(doctype):
module, test_module = get_modules(doctype)
meta = frappe.get_meta(doctype)
link_fields = meta.get_link_fields()
for df in meta.get_table_fields():
link_fields.extend(frappe.get_meta(df.options).get_link_fields())
options_list = [df.options for df in link_fields] + [doctype]
if hasattr(test_module, "test_dependencies"):
options_list += test_module.test_dependencies
options_list = list(set(options_list))
if hasattr(test_module, "test_ignore"):
for doctype_name in test_module.test_ignore:
if doctype_name in options_list:
options_list.remove(doctype_name)
return options_list
def make_test_records_for_doctype(doctype, verbose=0, force=False):
if not force and doctype in get_test_record_log():
return
module, test_module = get_modules(doctype)
if verbose:
print("Making for " + doctype)
if hasattr(test_module, "_make_test_records"):
frappe.local.test_objects[doctype] += test_module._make_test_records(verbose)
elif hasattr(test_module, "test_records"):
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_module.test_records, verbose, force)
else:
test_records = frappe.get_test_records(doctype)
if test_records:
frappe.local.test_objects[doctype] += make_test_objects(doctype, test_records, verbose, force)
elif verbose:
print_mandatory_fields(doctype)
add_to_test_record_log(doctype)
def make_test_objects(doctype, test_records=None, verbose=None, reset=False):
'''Make test objects from given list of `test_records` or from `test_records.json`'''
records = []
def revert_naming(d):
if getattr(d, 'naming_series', None):
revert_series_if_last(d.naming_series, d.name)
if test_records is None:
test_records = frappe.get_test_records(doctype)
for doc in test_records:
if not doc.get("doctype"):
doc["doctype"] = doctype
d = frappe.copy_doc(doc)
if d.meta.get_field("naming_series"):
if not d.naming_series:
d.naming_series = "_T-" + d.doctype + "-"
if doc.get('name'):
d.name = doc.get('name')
else:
d.set_new_name()
if frappe.db.exists(d.doctype, d.name) and not reset:
frappe.db.rollback()
# do not create test records, if already exists
continue
# submit if docstatus is set to 1 for test record
docstatus = d.docstatus
d.docstatus = 0
try:
d.run_method("before_test_insert")
d.insert()
if docstatus == 1:
d.submit()
except frappe.NameError:
revert_naming(d)
except Exception as e:
if d.flags.ignore_these_exceptions_in_test and e.__class__ in d.flags.ignore_these_exceptions_in_test:
revert_naming(d)
else:
raise
records.append(d.name)
frappe.db.commit()
return records
def print_mandatory_fields(doctype):
print("Please setup make_test_records for: " + doctype)
print("-" * 60)
meta = frappe.get_meta(doctype)
print("Autoname: " + (meta.autoname or ""))
print("Mandatory Fields: ")
for d in meta.get("fields", {"reqd":1}):
print(d.parent + ":" + d.fieldname + " | " + d.fieldtype + " | " + (d.options or ""))
print()
def add_to_test_record_log(doctype):
'''Add `doctype` to site/.test_log
`.test_log` is a cache of all doctypes for which test records are created'''
test_record_log = get_test_record_log()
if not doctype in test_record_log:
frappe.flags.test_record_log.append(doctype)
with open(frappe.get_site_path('.test_log'), 'w') as f:
f.write('\n'.join(filter(None, frappe.flags.test_record_log)))
def get_test_record_log():
'''Return the list of doctypes for which test records have been created'''
if 'test_record_log' not in frappe.flags:
if os.path.exists(frappe.get_site_path('.test_log')):
with open(frappe.get_site_path('.test_log'), 'r') as f:
frappe.flags.test_record_log = f.read().splitlines()
else:
frappe.flags.test_record_log = []
return frappe.flags.test_record_log |
py | 1a49396f4d566aa184622ed0f413c55033dba4c7 | from __future__ import print_function
import os
from unittest import SkipTest
import yaml
from parameterized import parameterized
from commcare_cloud.environment.main import Environment
from commcare_cloud.environment.paths import DefaultPaths
from nose.tools import assert_equal
TEST_ENVIRONMENTS_DIR = os.path.join(os.path.dirname(__file__), 'test_envs')
TEST_ENVIRONMENTS = os.listdir(TEST_ENVIRONMENTS_DIR)
@parameterized(TEST_ENVIRONMENTS)
def test_postgresql_config(env_name):
env = Environment(DefaultPaths(env_name, environments_dir=TEST_ENVIRONMENTS_DIR))
if not os.path.exists(env.paths.generated_yml):
raise SkipTest
with open(env.paths.generated_yml) as f:
generated = yaml.safe_load(f)
assert generated.keys() == ['postgresql_dbs']
expected_json = generated['postgresql_dbs']
actual_json = env.postgresql_config.to_generated_variables(env)['postgresql_dbs']
assert_equal(actual_json, expected_json)
|
py | 1a493a4a3d22be95b0653c575af3ea2b735de282 | """Functions for converting between color spaces.
The "central" color space in this module is RGB, more specifically the linear
sRGB color space using D65 as a white-point [1]_. This represents a
standard monitor (w/o gamma correction). For a good FAQ on color spaces see
[2]_.
The API consists of functions to convert to and from RGB as defined above, as
well as a generic function to convert to and from any supported color space
(which is done through RGB in most cases).
Supported color spaces
----------------------
* RGB : Red Green Blue.
Here the sRGB standard [1]_.
* HSV : Hue, Saturation, Value.
Uniquely defined when related to sRGB [3]_.
* RGB CIE : Red Green Blue.
The original RGB CIE standard from 1931 [4]_. Primary colors are 700 nm
(red), 546.1 nm (blue) and 435.8 nm (green).
* XYZ CIE : XYZ
Derived from the RGB CIE color space. Chosen such that
``x == y == z == 1/3`` at the whitepoint, and all color matching
functions are greater than zero everywhere.
* LAB CIE : Lightness, a, b
Colorspace derived from XYZ CIE that is intended to be more
perceptually uniform
* LUV CIE : Lightness, u, v
Colorspace derived from XYZ CIE that is intended to be more
perceptually uniform
* LCH CIE : Lightness, Chroma, Hue
Defined in terms of LAB CIE. C and H are the polar representation of
a and b. The polar angle C is defined to be on ``(0, 2*pi)``
:author: Nicolas Pinto (rgb2hsv)
:author: Ralf Gommers (hsv2rgb)
:author: Travis Oliphant (XYZ and RGB CIE functions)
:author: Matt Terry (lab2lch)
:author: Alex Izvorski (yuv2rgb, rgb2yuv and related)
:license: modified BSD
References
----------
.. [1] Official specification of sRGB, IEC 61966-2-1:1999.
.. [2] http://www.poynton.com/ColorFAQ.html
.. [3] https://en.wikipedia.org/wiki/HSL_and_HSV
.. [4] https://en.wikipedia.org/wiki/CIE_1931_color_space
"""
from warnings import warn
import numpy as np
from scipy import linalg
from ..util import dtype, dtype_limits
def guess_spatial_dimensions(image):
"""Make an educated guess about whether an image has a channels dimension.
Parameters
----------
image : ndarray
The input image.
Returns
-------
spatial_dims : int or None
The number of spatial dimensions of `image`. If ambiguous, the value
is ``None``.
Raises
------
ValueError
If the image array has less than two or more than four dimensions.
"""
if image.ndim == 2:
return 2
if image.ndim == 3 and image.shape[-1] != 3:
return 3
if image.ndim == 3 and image.shape[-1] == 3:
return None
if image.ndim == 4 and image.shape[-1] == 3:
return 3
else:
raise ValueError("Expected 2D, 3D, or 4D array, got %iD." % image.ndim)
def convert_colorspace(arr, fromspace, tospace):
"""Convert an image array to a new color space.
Valid color spaces are:
'RGB', 'HSV', 'RGB CIE', 'XYZ', 'YUV', 'YIQ', 'YPbPr', 'YCbCr', 'YDbDr'
Parameters
----------
arr : array_like
The image to convert.
fromspace : valid color space
The color space to convert from. Can be specified in lower case.
tospace : valid color space
The color space to convert to. Can be specified in lower case.
Returns
-------
out : ndarray
The converted image.
Notes
-----
Conversion is performed through the "central" RGB color space,
i.e. conversion from XYZ to HSV is implemented as ``XYZ -> RGB -> HSV``
instead of directly.
Examples
--------
>>> from skimage import data
>>> img = data.astronaut()
>>> img_hsv = convert_colorspace(img, 'RGB', 'HSV')
"""
fromdict = {'rgb': lambda im: im, 'hsv': hsv2rgb, 'rgb cie': rgbcie2rgb,
'xyz': xyz2rgb, 'yuv': yuv2rgb, 'yiq': yiq2rgb,
'ypbpr': ypbpr2rgb, 'ycbcr': ycbcr2rgb, 'ydbdr': ydbdr2rgb}
todict = {'rgb': lambda im: im, 'hsv': rgb2hsv, 'rgb cie': rgb2rgbcie,
'xyz': rgb2xyz, 'yuv': rgb2yuv, 'yiq': rgb2yiq,
'ypbpr': rgb2ypbpr, 'ycbcr': rgb2ycbcr, 'ydbdr': rgb2ydbdr}
fromspace = fromspace.lower()
tospace = tospace.lower()
if fromspace not in fromdict:
msg = '`fromspace` has to be one of {}'.format(fromdict.keys())
raise ValueError(msg)
if tospace not in todict:
msg = '`tospace` has to be one of {}'.format(todict.keys())
raise ValueError(msg)
return todict[tospace](fromdict[fromspace](arr))
def _prepare_colorarray(arr):
"""Check the shape of the array and convert it to
floating point representation.
"""
arr = np.asanyarray(arr)
if arr.ndim not in [3, 4] or arr.shape[-1] != 3:
msg = ("the input array must be have a shape == (.., ..,[ ..,] 3)), " +
"got (" + (", ".join(map(str, arr.shape))) + ")")
raise ValueError(msg)
return dtype.img_as_float(arr)
def _prepare_rgba_array(arr):
"""Check the shape of the array to be RGBA and convert it to
floating point representation.
"""
arr = np.asanyarray(arr)
if arr.ndim not in [3, 4] or arr.shape[-1] != 4:
msg = ("the input array must have a shape == (.., ..,[ ..,] 4)), "
"got {0}".format(arr.shape))
raise ValueError(msg)
return dtype.img_as_float(arr)
def rgba2rgb(rgba, background=(1, 1, 1)):
"""RGBA to RGB conversion.
Parameters
----------
rgba : array_like
The image in RGBA format, in a 3-D array of shape ``(.., .., 4)``.
background : array_like
The color of the background to blend the image with. A tuple
containing 3 floats between 0 to 1 - the RGB value of the background.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgba` is not a 3-D array of shape ``(.., .., 4)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
Examples
--------
>>> from skimage import color
>>> from skimage import data
>>> img_rgba = data.logo()
>>> img_rgb = color.rgba2rgb(img_rgba)
"""
arr = _prepare_rgba_array(rgba)
if isinstance(background, tuple) and len(background) != 3:
raise ValueError('the background must be a tuple with 3 items - the '
'RGB color of the background. Got {0} items.'
.format(len(background)))
alpha = arr[..., -1]
channels = arr[..., :-1]
out = np.empty_like(channels)
for ichan in range(channels.shape[-1]):
out[..., ichan] = np.clip(
(1 - alpha) * background[ichan] + alpha * channels[..., ichan],
a_min=0, a_max=1)
return out
def rgb2hsv(rgb):
"""RGB to HSV color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in HSV format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Conversion between RGB and HSV color spaces results in some loss of
precision, due to integer arithmetic and rounding [1]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/HSL_and_HSV
Examples
--------
>>> from skimage import color
>>> from skimage import data
>>> img = data.astronaut()
>>> img_hsv = color.rgb2hsv(img)
"""
arr = _prepare_colorarray(rgb)
out = np.empty_like(arr)
# -- V channel
out_v = arr.max(-1)
# -- S channel
delta = arr.ptp(-1)
# Ignore warning for zero divided by zero
old_settings = np.seterr(invalid='ignore')
out_s = delta / out_v
out_s[delta == 0.] = 0.
# -- H channel
# red is max
idx = (arr[:, :, 0] == out_v)
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[:, :, 1] == out_v)
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0]) / delta[idx]
# blue is max
idx = (arr[:, :, 2] == out_v)
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1]) / delta[idx]
out_h = (out[:, :, 0] / 6.) % 1.
out_h[delta == 0.] = 0.
np.seterr(**old_settings)
# -- output
out[:, :, 0] = out_h
out[:, :, 1] = out_s
out[:, :, 2] = out_v
# remove NaN
out[np.isnan(out)] = 0
return out
def hsv2rgb(hsv):
"""HSV to RGB color space conversion.
Parameters
----------
hsv : array_like
The image in HSV format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `hsv` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Conversion between RGB and HSV color spaces results in some loss of
precision, due to integer arithmetic and rounding [1]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/HSL_and_HSV
Examples
--------
>>> from skimage import data
>>> img = data.astronaut()
>>> img_hsv = rgb2hsv(img)
>>> img_rgb = hsv2rgb(img_hsv)
"""
arr = _prepare_colorarray(hsv)
hi = np.floor(arr[:, :, 0] * 6)
f = arr[:, :, 0] * 6 - hi
p = arr[:, :, 2] * (1 - arr[:, :, 1])
q = arr[:, :, 2] * (1 - f * arr[:, :, 1])
t = arr[:, :, 2] * (1 - (1 - f) * arr[:, :, 1])
v = arr[:, :, 2]
hi = np.dstack([hi, hi, hi]).astype(np.uint8) % 6
out = np.choose(hi, [np.dstack((v, t, p)),
np.dstack((q, v, p)),
np.dstack((p, v, t)),
np.dstack((p, q, v)),
np.dstack((t, p, v)),
np.dstack((v, p, q))])
return out
# ---------------------------------------------------------------
# Primaries for the coordinate systems
# ---------------------------------------------------------------
cie_primaries = np.array([700, 546.1, 435.8])
sb_primaries = np.array([1. / 155, 1. / 190, 1. / 225]) * 1e5
# ---------------------------------------------------------------
# Matrices that define conversion between different color spaces
# ---------------------------------------------------------------
# From sRGB specification
xyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]])
rgb_from_xyz = linalg.inv(xyz_from_rgb)
# From https://en.wikipedia.org/wiki/CIE_1931_color_space
# Note: Travis's code did not have the divide by 0.17697
xyz_from_rgbcie = np.array([[0.49, 0.31, 0.20],
[0.17697, 0.81240, 0.01063],
[0.00, 0.01, 0.99]]) / 0.17697
rgbcie_from_xyz = linalg.inv(xyz_from_rgbcie)
# construct matrices to and from rgb:
rgbcie_from_rgb = rgbcie_from_xyz @ xyz_from_rgb
rgb_from_rgbcie = rgb_from_xyz @ xyz_from_rgbcie
gray_from_rgb = np.array([[0.2125, 0.7154, 0.0721],
[0, 0, 0],
[0, 0, 0]])
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.14714119, -0.28886916, 0.43601035 ],
[ 0.61497538, -0.51496512, -0.10001026 ]])
rgb_from_yuv = linalg.inv(yuv_from_rgb)
yiq_from_rgb = np.array([[0.299 , 0.587 , 0.114 ],
[0.59590059, -0.27455667, -0.32134392],
[0.21153661, -0.52273617, 0.31119955]])
rgb_from_yiq = linalg.inv(yiq_from_rgb)
ypbpr_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ],
[-0.168736,-0.331264, 0.5 ],
[ 0.5 ,-0.418688,-0.081312]])
rgb_from_ypbpr = linalg.inv(ypbpr_from_rgb)
ycbcr_from_rgb = np.array([[ 65.481, 128.553, 24.966],
[ -37.797, -74.203, 112.0 ],
[ 112.0 , -93.786, -18.214]])
rgb_from_ycbcr = linalg.inv(ycbcr_from_rgb)
ydbdr_from_rgb = np.array([[ 0.299, 0.587, 0.114],
[ -0.45 , -0.883, 1.333],
[ -1.333, 1.116, 0.217]])
rgb_from_ydbdr = linalg.inv(ydbdr_from_rgb)
# CIE LAB constants for Observer=2A, Illuminant=D65
# NOTE: this is actually the XYZ values for the illuminant above.
lab_ref_white = np.array([0.95047, 1., 1.08883])
# XYZ coordinates of the illuminants, scaled to [0, 1]. For each illuminant I
# we have:
#
# illuminant[I][0] corresponds to the XYZ coordinates for the 2 degree
# field of view.
#
# illuminant[I][1] corresponds to the XYZ coordinates for the 10 degree
# field of view.
#
# The XYZ coordinates are calculated from [1], using the formula:
#
# X = x * ( Y / y )
# Y = Y
# Z = ( 1 - x - y ) * ( Y / y )
#
# where Y = 1. The only exception is the illuminant "D65" with aperture angle
# 2, whose coordinates are copied from 'lab_ref_white' for
# backward-compatibility reasons.
#
# References
# ----------
# .. [1] https://en.wikipedia.org/wiki/Standard_illuminant
illuminants = \
{"A": {'2': (1.098466069456375, 1, 0.3558228003436005),
'10': (1.111420406956693, 1, 0.3519978321919493)},
"D50": {'2': (0.9642119944211994, 1, 0.8251882845188288),
'10': (0.9672062750333777, 1, 0.8142801513128616)},
"D55": {'2': (0.956797052643698, 1, 0.9214805860173273),
'10': (0.9579665682254781, 1, 0.9092525159847462)},
"D65": {'2': (0.95047, 1., 1.08883), # This was: `lab_ref_white`
'10': (0.94809667673716, 1, 1.0730513595166162)},
"D75": {'2': (0.9497220898840717, 1, 1.226393520724154),
'10': (0.9441713925645873, 1, 1.2064272211720228)},
"E": {'2': (1.0, 1.0, 1.0),
'10': (1.0, 1.0, 1.0)}}
def get_xyz_coords(illuminant, observer):
"""Get the XYZ coordinates of the given illuminant and observer [1]_.
Parameters
----------
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
(x, y, z) : tuple
A tuple with 3 elements containing the XYZ coordinates of the given
illuminant.
Raises
------
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
"""
illuminant = illuminant.upper()
try:
return illuminants[illuminant][observer]
except KeyError:
raise ValueError("Unknown illuminant/observer combination\
(\'{0}\', \'{1}\')".format(illuminant, observer))
# Haematoxylin-Eosin-DAB colorspace
# From original Ruifrok's paper: A. C. Ruifrok and D. A. Johnston,
# "Quantification of histochemical staining by color deconvolution.,"
# Analytical and quantitative cytology and histology / the International
# Academy of Cytology [and] American Society of Cytology, vol. 23, no. 4,
# pp. 291-9, Aug. 2001.
rgb_from_hed = np.array([[0.65, 0.70, 0.29],
[0.07, 0.99, 0.11],
[0.27, 0.57, 0.78]])
hed_from_rgb = linalg.inv(rgb_from_hed)
# Following matrices are adapted form the Java code written by G.Landini.
# The original code is available at:
# http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
# Hematoxylin + DAB
rgb_from_hdx = np.array([[0.650, 0.704, 0.286],
[0.268, 0.570, 0.776],
[0.0, 0.0, 0.0]])
rgb_from_hdx[2, :] = np.cross(rgb_from_hdx[0, :], rgb_from_hdx[1, :])
hdx_from_rgb = linalg.inv(rgb_from_hdx)
# Feulgen + Light Green
rgb_from_fgx = np.array([[0.46420921, 0.83008335, 0.30827187],
[0.94705542, 0.25373821, 0.19650764],
[0.0, 0.0, 0.0]])
rgb_from_fgx[2, :] = np.cross(rgb_from_fgx[0, :], rgb_from_fgx[1, :])
fgx_from_rgb = linalg.inv(rgb_from_fgx)
# Giemsa: Methyl Blue + Eosin
rgb_from_bex = np.array([[0.834750233, 0.513556283, 0.196330403],
[0.092789, 0.954111, 0.283111],
[0.0, 0.0, 0.0]])
rgb_from_bex[2, :] = np.cross(rgb_from_bex[0, :], rgb_from_bex[1, :])
bex_from_rgb = linalg.inv(rgb_from_bex)
# FastRed + FastBlue + DAB
rgb_from_rbd = np.array([[0.21393921, 0.85112669, 0.47794022],
[0.74890292, 0.60624161, 0.26731082],
[0.268, 0.570, 0.776]])
rbd_from_rgb = linalg.inv(rgb_from_rbd)
# Methyl Green + DAB
rgb_from_gdx = np.array([[0.98003, 0.144316, 0.133146],
[0.268, 0.570, 0.776],
[0.0, 0.0, 0.0]])
rgb_from_gdx[2, :] = np.cross(rgb_from_gdx[0, :], rgb_from_gdx[1, :])
gdx_from_rgb = linalg.inv(rgb_from_gdx)
# Hematoxylin + AEC
rgb_from_hax = np.array([[0.650, 0.704, 0.286],
[0.2743, 0.6796, 0.6803],
[0.0, 0.0, 0.0]])
rgb_from_hax[2, :] = np.cross(rgb_from_hax[0, :], rgb_from_hax[1, :])
hax_from_rgb = linalg.inv(rgb_from_hax)
# Blue matrix Anilline Blue + Red matrix Azocarmine + Orange matrix Orange-G
rgb_from_bro = np.array([[0.853033, 0.508733, 0.112656],
[0.09289875, 0.8662008, 0.49098468],
[0.10732849, 0.36765403, 0.9237484]])
bro_from_rgb = linalg.inv(rgb_from_bro)
# Methyl Blue + Ponceau Fuchsin
rgb_from_bpx = np.array([[0.7995107, 0.5913521, 0.10528667],
[0.09997159, 0.73738605, 0.6680326],
[0.0, 0.0, 0.0]])
rgb_from_bpx[2, :] = np.cross(rgb_from_bpx[0, :], rgb_from_bpx[1, :])
bpx_from_rgb = linalg.inv(rgb_from_bpx)
# Alcian Blue + Hematoxylin
rgb_from_ahx = np.array([[0.874622, 0.457711, 0.158256],
[0.552556, 0.7544, 0.353744],
[0.0, 0.0, 0.0]])
rgb_from_ahx[2, :] = np.cross(rgb_from_ahx[0, :], rgb_from_ahx[1, :])
ahx_from_rgb = linalg.inv(rgb_from_ahx)
# Hematoxylin + PAS
rgb_from_hpx = np.array([[0.644211, 0.716556, 0.266844],
[0.175411, 0.972178, 0.154589],
[0.0, 0.0, 0.0]])
rgb_from_hpx[2, :] = np.cross(rgb_from_hpx[0, :], rgb_from_hpx[1, :])
hpx_from_rgb = linalg.inv(rgb_from_hpx)
# -------------------------------------------------------------
# The conversion functions that make use of the matrices above
# -------------------------------------------------------------
def _convert(matrix, arr):
"""Do the color space conversion.
Parameters
----------
matrix : array_like
The 3x3 matrix to use.
arr : array_like
The input array.
Returns
-------
out : ndarray, dtype=float
The converted array.
"""
arr = _prepare_colorarray(arr)
return arr @ matrix.T.copy()
def xyz2rgb(xyz):
"""XYZ to RGB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts to sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2rgb
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_rgb = xyz2rgb(img_xyz)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _convert(rgb_from_xyz, xyz)
mask = arr > 0.0031308
arr[mask] = 1.055 * np.power(arr[mask], 1 / 2.4) - 0.055
arr[~mask] *= 12.92
np.clip(arr, 0, 1, out=arr)
return arr
def rgb2xyz(rgb):
"""RGB to XYZ color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Returns
-------
out : ndarray
The image in XYZ format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts from sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _prepare_colorarray(rgb).copy()
mask = arr > 0.04045
arr[mask] = np.power((arr[mask] + 0.055) / 1.055, 2.4)
arr[~mask] /= 12.92
return _convert(xyz_from_rgb, arr)
def rgb2rgbcie(rgb):
"""RGB to RGB CIE color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB CIE format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2rgbcie
>>> img = data.astronaut()
>>> img_rgbcie = rgb2rgbcie(img)
"""
return _convert(rgbcie_from_rgb, rgb)
def rgbcie2rgb(rgbcie):
"""RGB CIE to RGB color space conversion.
Parameters
----------
rgbcie : array_like
The image in RGB CIE format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgbcie` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2rgbcie, rgbcie2rgb
>>> img = data.astronaut()
>>> img_rgbcie = rgb2rgbcie(img)
>>> img_rgb = rgbcie2rgb(img_rgbcie)
"""
return _convert(rgb_from_rgbcie, rgbcie)
def rgb2gray(rgb):
"""Compute luminance of an RGB image.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D or 4-D array of shape
``(.., ..,[ ..,] 3)``, or in RGBA format with shape
``(.., ..,[ ..,] 4)``.
Returns
-------
out : ndarray
The luminance image - an array which is the same size as the input
array, but with the channel dimension removed.
Raises
------
ValueError
If `rgb2gray` is not a 3-D or 4-D arrays of shape
``(.., ..,[ ..,] 3)`` or ``(.., ..,[ ..,] 4)``.
References
----------
.. [1] http://www.poynton.com/PDFs/ColorFAQ.pdf
Notes
-----
The weights used in this conversion are calibrated for contemporary
CRT phosphors::
Y = 0.2125 R + 0.7154 G + 0.0721 B
If there is an alpha channel present, it is ignored.
Examples
--------
>>> from skimage.color import rgb2gray
>>> from skimage import data
>>> img = data.astronaut()
>>> img_gray = rgb2gray(img)
"""
if rgb.ndim == 2:
return np.ascontiguousarray(rgb)
rgb = _prepare_colorarray(rgb[..., :3])
coeffs = np.array([0.2125, 0.7154, 0.0721], dtype=rgb.dtype)
return rgb @ coeffs
rgb2grey = rgb2gray
def gray2rgb(image, alpha=None):
"""Create an RGB representation of a gray-level image.
Parameters
----------
image : array_like
Input image of shape ``(M[, N][, P])``.
alpha : bool, optional
Ensure that the output image has an alpha layer. If None,
alpha layers are passed through but not created.
Returns
-------
rgb : ndarray
RGB image of shape ``(M[, N][, P], 3)``.
Raises
------
ValueError
If the input is not a 1-, 2- or 3-dimensional image.
Notes
-----
If the input is a 1-dimensional image of shape ``(M, )``, the output
will be shape ``(M, 3)``.
"""
is_rgb = False
is_alpha = False
dims = np.squeeze(image).ndim
if dims == 3:
if image.shape[2] == 3:
is_rgb = True
elif image.shape[2] == 4:
is_alpha = True
is_rgb = True
if is_rgb:
if alpha is False:
image = image[..., :3]
elif alpha is True and is_alpha is False:
alpha_layer = (np.ones_like(image[..., 0, np.newaxis]) *
dtype_limits(image, clip_negative=False)[1])
image = np.concatenate((image, alpha_layer), axis=2)
return image
elif dims in (1, 2, 3):
image = image[..., np.newaxis]
if alpha:
alpha_layer = (np.ones_like(image) * dtype_limits(image, clip_negative=False)[1])
return np.concatenate(3 * (image,) + (alpha_layer,), axis=-1)
else:
return np.concatenate(3 * (image,), axis=-1)
else:
raise ValueError("Input image expected to be RGB, RGBA or gray.")
grey2rgb = gray2rgb
def xyz2lab(xyz, illuminant="D65", observer="2"):
"""XYZ to CIE-LAB color space conversion.
Parameters
----------
xyz : array_like
The image in XYZ format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in CIE-LAB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `xyz` is not a 3-D array of shape ``(.., ..,[ ..,] 3)``.
ValueError
If either the illuminant or the observer angle is unsupported or
unknown.
Notes
-----
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] https://en.wikipedia.org/wiki/Lab_color_space
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2lab
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_lab = xyz2lab(img_xyz)
"""
arr = _prepare_colorarray(xyz)
xyz_ref_white = get_xyz_coords(illuminant, observer)
# scale by CIE XYZ tristimulus values of the reference white point
arr = arr / xyz_ref_white
# Nonlinear distortion and linear transformation
mask = arr > 0.008856
arr[mask] = np.cbrt(arr[mask])
arr[~mask] = 7.787 * arr[~mask] + 16. / 116.
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
# Vector scaling
L = (116. * y) - 16.
a = 500.0 * (x - y)
b = 200.0 * (y - z)
return np.concatenate([x[..., np.newaxis] for x in [L, a, b]], axis=-1)
def lab2xyz(lab, illuminant="D65", observer="2"):
"""CIE-LAB to XYZcolor space conversion.
Parameters
----------
lab : array_like
The image in lab format, in a 3-D array of shape ``(.., .., 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in XYZ format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
UserWarning
If any of the pixels are invalid (Z < 0).
Notes
-----
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values x_ref
= 95.047, y_ref = 100., z_ref = 108.883. See function 'get_xyz_coords' for
a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=07#text7
.. [2] https://en.wikipedia.org/wiki/Lab_color_space
"""
arr = _prepare_colorarray(lab).copy()
L, a, b = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]
y = (L + 16.) / 116.
x = (a / 500.) + y
z = y - (b / 200.)
if np.any(z < 0):
invalid = np.nonzero(z < 0)
warn('Color data out of range: Z < 0 in %s pixels' % invalid[0].size)
z[invalid] = 0
out = np.dstack([x, y, z])
mask = out > 0.2068966
out[mask] = np.power(out[mask], 3.)
out[~mask] = (out[~mask] - 16.0 / 116.) / 7.787
# rescale to the reference white (illuminant)
xyz_ref_white = get_xyz_coords(illuminant, observer)
out *= xyz_ref_white
return out
def rgb2lab(rgb, illuminant="D65", observer="2"):
"""RGB to lab color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in Lab format, in a 3- or 4-D array of shape
``(.., ..,[ ..,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(.., ..,[ ..,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
Notes
-----
This function uses rgb2xyz and xyz2lab.
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
"""
return xyz2lab(rgb2xyz(rgb), illuminant, observer)
def lab2rgb(lab, illuminant="D65", observer="2"):
"""Lab to RGB color space conversion.
Parameters
----------
lab : array_like
The image in Lab format, in a 3-D array of shape ``(.., .., 3)``.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `lab` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
Notes
-----
This function uses lab2xyz and xyz2rgb.
By default Observer= 2A, Illuminant= D65. CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function `get_xyz_coords` for
a list of supported illuminants.
"""
return xyz2rgb(lab2xyz(lab, illuminant, observer))
def xyz2luv(xyz, illuminant="D65", observer="2"):
"""XYZ to CIE-Luv color space conversion.
Parameters
----------
xyz : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in XYZ format. Final dimension denotes
channels.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in CIE-Luv format. Same dimensions as input.
Raises
------
ValueError
If `xyz` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
Notes
-----
By default XYZ conversion weights use observer=2A. Reference whitepoint
for D65 Illuminant, with XYZ tristimulus values of ``(95.047, 100.,
108.883)``. See function 'get_xyz_coords' for a list of supported
illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] https://en.wikipedia.org/wiki/CIELUV
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2xyz, xyz2luv
>>> img = data.astronaut()
>>> img_xyz = rgb2xyz(img)
>>> img_luv = xyz2luv(img_xyz)
"""
arr = _prepare_colorarray(xyz)
# extract channels
x, y, z = arr[..., 0], arr[..., 1], arr[..., 2]
eps = np.finfo(np.float).eps
# compute y_r and L
xyz_ref_white = np.array(get_xyz_coords(illuminant, observer))
L = y / xyz_ref_white[1]
mask = L > 0.008856
L[mask] = 116. * np.cbrt(L[mask]) - 16.
L[~mask] = 903.3 * L[~mask]
u0 = 4 * xyz_ref_white[0] / ([1, 15, 3] @ xyz_ref_white)
v0 = 9 * xyz_ref_white[1] / ([1, 15, 3] @ xyz_ref_white)
# u' and v' helper functions
def fu(X, Y, Z):
return (4. * X) / (X + 15. * Y + 3. * Z + eps)
def fv(X, Y, Z):
return (9. * Y) / (X + 15. * Y + 3. * Z + eps)
# compute u and v using helper functions
u = 13. * L * (fu(x, y, z) - u0)
v = 13. * L * (fv(x, y, z) - v0)
return np.concatenate([q[..., np.newaxis] for q in [L, u, v]], axis=-1)
def luv2xyz(luv, illuminant="D65", observer="2"):
"""CIE-Luv to XYZ color space conversion.
Parameters
----------
luv : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in CIE-Luv format. Final dimension denotes
channels.
illuminant : {"A", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10"}, optional
The aperture angle of the observer.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in XYZ format. Same dimensions as input.
Raises
------
ValueError
If `luv` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
Notes
-----
XYZ conversion weights use observer=2A. Reference whitepoint for D65
Illuminant, with XYZ tristimulus values of ``(95.047, 100., 108.883)``. See
function 'get_xyz_coords' for a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] https://en.wikipedia.org/wiki/CIELUV
"""
arr = _prepare_colorarray(luv).copy()
L, u, v = arr[:, :, 0], arr[:, :, 1], arr[:, :, 2]
eps = np.finfo(np.float).eps
# compute y
y = L.copy()
mask = y > 7.999625
y[mask] = np.power((y[mask] + 16.) / 116., 3.)
y[~mask] = y[~mask] / 903.3
xyz_ref_white = get_xyz_coords(illuminant, observer)
y *= xyz_ref_white[1]
# reference white x,z
uv_weights = np.array([1, 15, 3])
u0 = 4 * xyz_ref_white[0] / (uv_weights @ xyz_ref_white)
v0 = 9 * xyz_ref_white[1] / (uv_weights @ xyz_ref_white)
# compute intermediate values
a = u0 + u / (13. * L + eps)
b = v0 + v / (13. * L + eps)
c = 3 * y * (5 * b - 3)
# compute x and z
z = ((a - 4) * c - 15 * a * b * y) / (12 * b)
x = -(c / b + 3. * z)
return np.concatenate([q[..., np.newaxis] for q in [x, y, z]], axis=-1)
def rgb2luv(rgb):
"""RGB to CIE-Luv color space conversion.
Parameters
----------
rgb : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in RGB format. Final dimension denotes
channels.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in CIE Luv format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This function uses rgb2xyz and xyz2luv.
References
----------
.. [1] http://www.easyrgb.com/index.php?X=MATH&H=16#text16
.. [2] http://www.easyrgb.com/index.php?X=MATH&H=02#text2
.. [3] https://en.wikipedia.org/wiki/CIELUV
"""
return xyz2luv(rgb2xyz(rgb))
def luv2rgb(luv):
"""Luv to RGB color space conversion.
Parameters
----------
luv : (M, N, [P,] 3) array_like
The 3 or 4 dimensional image in CIE Luv format. Final dimension denotes
channels.
Returns
-------
out : (M, N, [P,] 3) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `luv` is not a 3-D or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This function uses luv2xyz and xyz2rgb.
"""
return xyz2rgb(luv2xyz(luv))
def rgb2hed(rgb):
"""RGB to Haematoxylin-Eosin-DAB (HED) color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Returns
-------
out : ndarray
The image in HED format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2hed
>>> ihc = data.immunohistochemistry()
>>> ihc_hed = rgb2hed(ihc)
"""
return separate_stains(rgb, hed_from_rgb)
def hed2rgb(hed):
"""Haematoxylin-Eosin-DAB (HED) to RGB color space conversion.
Parameters
----------
hed : array_like
The image in the HED color space, in a 3-D array of shape
``(.., .., 3)``.
Returns
-------
out : ndarray
The image in RGB, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `hed` is not a 3-D array of shape ``(.., .., 3)``.
References
----------
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2hed, hed2rgb
>>> ihc = data.immunohistochemistry()
>>> ihc_hed = rgb2hed(ihc)
>>> ihc_rgb = hed2rgb(ihc_hed)
"""
return combine_stains(hed, rgb_from_hed)
def separate_stains(rgb, conv_matrix):
"""RGB to stain color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
conv_matrix: ndarray
The stain separation matrix as described by G. Landini [1]_.
Returns
-------
out : ndarray
The image in stain color space, in a 3-D array of shape
``(.., .., 3)``.
Raises
------
ValueError
If `rgb` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Stain separation matrices available in the ``color`` module and their
respective colorspace:
* ``hed_from_rgb``: Hematoxylin + Eosin + DAB
* ``hdx_from_rgb``: Hematoxylin + DAB
* ``fgx_from_rgb``: Feulgen + Light Green
* ``bex_from_rgb``: Giemsa stain : Methyl Blue + Eosin
* ``rbd_from_rgb``: FastRed + FastBlue + DAB
* ``gdx_from_rgb``: Methyl Green + DAB
* ``hax_from_rgb``: Hematoxylin + AEC
* ``bro_from_rgb``: Blue matrix Anilline Blue + Red matrix Azocarmine\
+ Orange matrix Orange-G
* ``bpx_from_rgb``: Methyl Blue + Ponceau Fuchsin
* ``ahx_from_rgb``: Alcian Blue + Hematoxylin
* ``hpx_from_rgb``: Hematoxylin + PAS
References
----------
.. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
Examples
--------
>>> from skimage import data
>>> from skimage.color import separate_stains, hdx_from_rgb
>>> ihc = data.immunohistochemistry()
>>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
"""
rgb = dtype.img_as_float(rgb, force_copy=True)
rgb += 2
stains = np.reshape(-np.log10(rgb), (-1, 3)) @ conv_matrix
return np.reshape(stains, rgb.shape)
def combine_stains(stains, conv_matrix):
"""Stain to RGB color space conversion.
Parameters
----------
stains : array_like
The image in stain color space, in a 3-D array of shape
``(.., .., 3)``.
conv_matrix: ndarray
The stain separation matrix as described by G. Landini [1]_.
Returns
-------
out : ndarray
The image in RGB format, in a 3-D array of shape ``(.., .., 3)``.
Raises
------
ValueError
If `stains` is not a 3-D array of shape ``(.., .., 3)``.
Notes
-----
Stain combination matrices available in the ``color`` module and their
respective colorspace:
* ``rgb_from_hed``: Hematoxylin + Eosin + DAB
* ``rgb_from_hdx``: Hematoxylin + DAB
* ``rgb_from_fgx``: Feulgen + Light Green
* ``rgb_from_bex``: Giemsa stain : Methyl Blue + Eosin
* ``rgb_from_rbd``: FastRed + FastBlue + DAB
* ``rgb_from_gdx``: Methyl Green + DAB
* ``rgb_from_hax``: Hematoxylin + AEC
* ``rgb_from_bro``: Blue matrix Anilline Blue + Red matrix Azocarmine\
+ Orange matrix Orange-G
* ``rgb_from_bpx``: Methyl Blue + Ponceau Fuchsin
* ``rgb_from_ahx``: Alcian Blue + Hematoxylin
* ``rgb_from_hpx``: Hematoxylin + PAS
References
----------
.. [1] http://www.dentistry.bham.ac.uk/landinig/software/cdeconv/cdeconv.html
Examples
--------
>>> from skimage import data
>>> from skimage.color import (separate_stains, combine_stains,
... hdx_from_rgb, rgb_from_hdx)
>>> ihc = data.immunohistochemistry()
>>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
>>> ihc_rgb = combine_stains(ihc_hdx, rgb_from_hdx)
"""
from ..exposure import rescale_intensity
stains = dtype.img_as_float(stains)
logrgb2 = -np.reshape(stains, (-1, 3)) @ conv_matrix
rgb2 = np.power(10, logrgb2)
return rescale_intensity(np.reshape(rgb2 - 2, stains.shape),
in_range=(-1, 1))
def lab2lch(lab):
"""CIE-LAB to CIE-LCH color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lab : array_like
The N-D image in CIE-LAB format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LCH format, in a N-D array with same shape as input `lab`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, a, b).
Notes
-----
The Hue is expressed as an angle between ``(0, 2*pi)``
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lab2lch
>>> img = data.astronaut()
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
"""
lch = _prepare_lab_array(lab)
a, b = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = _cart2polar_2pi(a, b)
return lch
def _cart2polar_2pi(x, y):
"""convert cartesian coordinates to polar (uses non-standard theta range!)
NON-STANDARD RANGE! Maps to ``(0, 2*pi)`` rather than usual ``(-pi, +pi)``
"""
r, t = np.hypot(x, y), np.arctan2(y, x)
t += np.where(t < 0., 2 * np.pi, 0)
return r, t
def lch2lab(lch):
"""CIE-LCH to CIE-LAB color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lch : array_like
The N-D image in CIE-LCH format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
Returns
-------
out : ndarray
The image in LAB format, with same shape as input `lch`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, c, h).
Examples
--------
>>> from skimage import data
>>> from skimage.color import rgb2lab, lch2lab
>>> img = data.astronaut()
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
>>> img_lab2 = lch2lab(img_lch)
"""
lch = _prepare_lab_array(lch)
c, h = lch[..., 1], lch[..., 2]
lch[..., 1], lch[..., 2] = c * np.cos(h), c * np.sin(h)
return lch
def _prepare_lab_array(arr):
"""Ensure input for lab2lch, lch2lab are well-posed.
Arrays must be in floating point and have at least 3 elements in
last dimension. Return a new array.
"""
arr = np.asarray(arr)
shape = arr.shape
if shape[-1] < 3:
raise ValueError('Input array has less than 3 color channels')
return dtype.img_as_float(arr, force_copy=True)
def rgb2yuv(rgb):
"""RGB to YUV color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YUV format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
Y is between 0 and 1. Use YCbCr instead of YUV for the color space which
is commonly used by video codecs (where Y ranges from 16 to 235)
References
----------
.. [1] https://en.wikipedia.org/wiki/YUV
"""
return _convert(yuv_from_rgb, rgb)
def rgb2yiq(rgb):
"""RGB to YIQ color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YIQ format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
"""
return _convert(yiq_from_rgb, rgb)
def rgb2ypbpr(rgb):
"""RGB to YPbPr color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YPbPr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/YPbPr
"""
return _convert(ypbpr_from_rgb, rgb)
def rgb2ycbcr(rgb):
"""RGB to YCbCr color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YCbCr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
Y is between 16 and 235. This is the color space which is commonly used
by video codecs, it is sometimes incorrectly called "YUV"
References
----------
.. [1] https://en.wikipedia.org/wiki/YCbCr
"""
arr = _convert(ycbcr_from_rgb, rgb)
arr[..., 0] += 16
arr[..., 1] += 128
arr[..., 2] += 128
return arr
def rgb2ydbdr(rgb):
"""RGB to YDbDr color space conversion.
Parameters
----------
rgb : array_like
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in YDbDr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `rgb` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This is the color space which is commonly used
by video codecs, it is also the reversible color transform in JPEG2000.
References
----------
.. [1] https://en.wikipedia.org/wiki/YDbDr
"""
arr = _convert(ydbdr_from_rgb, rgb)
return arr
def yuv2rgb(yuv):
"""YUV to RGB color space conversion.
Parameters
----------
yuv : array_like
The image in YUV format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `yuv` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/YUV
"""
return _convert(rgb_from_yuv, yuv)
def yiq2rgb(yiq):
"""YIQ to RGB color space conversion.
Parameters
----------
yiq : array_like
The image in YIQ format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `yiq` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
"""
return _convert(rgb_from_yiq, yiq)
def ypbpr2rgb(ypbpr):
"""YPbPr to RGB color space conversion.
Parameters
----------
ypbpr : array_like
The image in YPbPr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `ypbpr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/YPbPr
"""
return _convert(rgb_from_ypbpr, ypbpr)
def ycbcr2rgb(ycbcr):
"""YCbCr to RGB color space conversion.
Parameters
----------
ycbcr : array_like
The image in YCbCr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `ycbcr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
Y is between 16 and 235. This is the color space which is commonly used
by video codecs, it is sometimes incorrectly called "YUV"
References
----------
.. [1] https://en.wikipedia.org/wiki/YCbCr
"""
arr = ycbcr.copy()
arr[..., 0] -= 16
arr[..., 1] -= 128
arr[..., 2] -= 128
return _convert(rgb_from_ycbcr, arr)
def ydbdr2rgb(ydbdr):
"""YDbDr to RGB color space conversion.
Parameters
----------
ydbdr : array_like
The image in YDbDr format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Returns
-------
out : ndarray
The image in RGB format, in a 3- or 4-D array of shape
``(M, N, [P,] 3)``.
Raises
------
ValueError
If `ydbdr` is not a 3- or 4-D array of shape ``(M, N, [P,] 3)``.
Notes
-----
This is the color space which is commonly used
by video codecs, it is also the reversible color transform in JPEG2000.
References
----------
.. [1] https://en.wikipedia.org/wiki/YDbDr
"""
arr = ydbdr.copy()
return _convert(rgb_from_ydbdr, arr)
|
py | 1a493b5219ef9e996ca4f917b97f475af793d06b |
# Code from Chapter 7 of Machine Learning: An Algorithmic Perspective
# by Stephen Marsland (http://seat.massey.ac.nz/personal/s.r.marsland/MLBook.html)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# Stephen Marsland, 2008
from numpy import *
import dtree
class bagger:
"""The bagging algorithm based on the decision tree of Chapter 6"""
def __init__(self):
""" Constructor """
self.tree = dtree.dtree()
def bag(self,data,targets,features,nSamples):
nPoints = shape(data)[0]
nDim = shape(data)[1]
self.nSamples = nSamples
# Compute boostrap samples
samplePoints = random.randint(0,nPoints,(nPoints,nSamples))
classifiers = []
for i in range(nSamples):
sample = []
sampleTarget = []
for j in range(nPoints):
sample.append(data[samplePoints[j,i]])
sampleTarget.append(targets[samplePoints[j,i]])
# Train classifiers
classifiers.append(self.tree.make_tree(sample,sampleTarget,features,1))
return classifiers
def bagclass(self,classifiers,data):
decision = []
# Majority voting
for j in range(len(data)):
outputs = []
#print data[j]
for i in range(self.nSamples):
out = self.tree.classify(classifiers[i],data[j])
if out is not None:
outputs.append(out)
# List the possible outputs
out = []
for each in outputs:
if out.count(each)==0:
out.append(each)
frequency = zeros(len(out))
index = 0
if len(out)>0:
for each in out:
frequency[index] = outputs.count(each)
index += 1
decision.append(out[frequency.argmax()])
else:
decision.append(None)
return decision
|
py | 1a493b738bbcae9ea9e4ae7b8903108d0f3d918b | import typing
import inspect
import functools
from base64 import b64decode
from types import FunctionType
import httpx
from rpcpy.serializers import BaseSerializer, JSONSerializer
from rpcpy.utils.openapi import set_type_model
__all__ = ["Client"]
Function = typing.TypeVar("Function", bound=FunctionType)
class ClientMeta(type):
def __call__(cls, *args: typing.Any, **kwargs: typing.Any) -> typing.Any:
if cls.__name__ == "Client":
if isinstance(args[0], httpx.Client):
return SyncClient(*args, **kwargs)
if isinstance(args[0], httpx.AsyncClient):
return AsyncClient(*args, **kwargs)
raise TypeError(
"The parameter `client` must be an httpx.Client or httpx.AsyncClient object."
)
return super().__call__(*args, **kwargs)
class Client(metaclass=ClientMeta):
def __init__(
self,
client: typing.Union[httpx.Client, httpx.AsyncClient],
*,
base_url: str,
request_serializer: BaseSerializer = JSONSerializer(),
response_serializer: BaseSerializer = JSONSerializer(),
) -> None:
assert base_url.endswith("/"), "base_url must be end with '/'"
self.base_url = base_url
self.client = client
self.request_serializer = request_serializer
self.response_serializer = response_serializer
def remote_call(self, func: Function) -> Function:
set_type_model(func) # try set `__body_model__`
return func
def _get_url(self, func: Function) -> str:
return self.base_url + func.__name__
def _get_content(
self, func: typing.Callable, *args: typing.Any, **kwargs: typing.Any
) -> bytes:
sig = inspect.signature(func)
bound_values = sig.bind(*args, **kwargs)
if hasattr(func, "__body_model__"):
_params = getattr(func, "__body_model__")(**bound_values.arguments).dict()
else:
_params = dict(**bound_values.arguments)
return self.request_serializer.encode(_params)
class AsyncClient(Client):
if typing.TYPE_CHECKING:
client: httpx.AsyncClient
def remote_call(self, func: Function) -> Function:
if not (inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(func)):
raise TypeError(
"Asynchronous Client can only register asynchronous functions."
)
func = super().remote_call(func)
url = self._get_url(func)
if not inspect.isasyncgenfunction(func):
@functools.wraps(func)
async def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
post_content = self._get_content(func, *args, **kwargs)
resp = await self.client.post(
url,
content=post_content,
headers={
"content-type": self.request_serializer.content_type,
"serializer": self.request_serializer.name,
},
)
resp.raise_for_status()
return self.response_serializer.decode(resp.content)
else:
@functools.wraps(func)
async def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
post_content = self._get_content(func, *args, **kwargs)
async with self.client.stream(
"POST",
url,
content=post_content,
headers={
"content-type": self.request_serializer.content_type,
"serializer": self.request_serializer.name,
},
) as resp:
resp.raise_for_status()
async for line in resp.aiter_lines():
if line.startswith("data:"):
data = line.split(":", maxsplit=1)[1]
yield self.response_serializer.decode(
b64decode(data.encode("ascii"))
)
return typing.cast(Function, wrapper)
class SyncClient(Client):
if typing.TYPE_CHECKING:
client: httpx.Client
def remote_call(self, func: Function) -> Function:
if inspect.iscoroutinefunction(func) or inspect.isasyncgenfunction(func):
raise TypeError(
"Synchronization Client can only register synchronization functions."
)
func = super().remote_call(func)
url = self._get_url(func)
if not inspect.isgeneratorfunction(func):
@functools.wraps(func)
def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
post_content = self._get_content(func, *args, **kwargs)
resp = self.client.post(
url,
content=post_content,
headers={
"content-type": self.request_serializer.content_type,
"serializer": self.request_serializer.name,
},
)
resp.raise_for_status()
return self.response_serializer.decode(resp.content)
else:
@functools.wraps(func)
def wrapper(*args: typing.Any, **kwargs: typing.Any) -> typing.Any:
post_content = self._get_content(func, *args, **kwargs)
with self.client.stream(
"POST",
url,
content=post_content,
headers={
"content-type": self.request_serializer.content_type,
"serializer": self.request_serializer.name,
},
) as resp:
resp.raise_for_status()
for line in resp.iter_lines():
if line.startswith("data:"):
data = line.split(":", maxsplit=1)[1]
yield self.response_serializer.decode(
b64decode(data.encode("ascii"))
)
return typing.cast(Function, wrapper)
|
py | 1a493bc58a2274d61bc6aad6437f8b3cdca45850 | """
Expressions
-----------
Offer fast expression evaluation through numexpr
"""
import warnings
import numpy as np
from pandas._config import get_option
from pandas._libs.lib import values_from_object
from pandas.core.dtypes.generic import ABCDataFrame
from pandas.core.computation.check import _NUMEXPR_INSTALLED
if _NUMEXPR_INSTALLED:
import numexpr as ne
_TEST_MODE = None
_TEST_RESULT = None
_USE_NUMEXPR = _NUMEXPR_INSTALLED
_evaluate = None
_where = None
# the set of dtypes that we will allow pass to numexpr
_ALLOWED_DTYPES = {
"evaluate": {"int64", "int32", "float64", "float32", "bool"},
"where": {"int64", "float64", "bool"},
}
# the minimum prod shape that we will use numexpr
_MIN_ELEMENTS = 10000
def set_use_numexpr(v=True):
# set/unset to use numexpr
global _USE_NUMEXPR
if _NUMEXPR_INSTALLED:
_USE_NUMEXPR = v
# choose what we are going to do
global _evaluate, _where
if not _USE_NUMEXPR:
_evaluate = _evaluate_standard
_where = _where_standard
else:
_evaluate = _evaluate_numexpr
_where = _where_numexpr
def set_numexpr_threads(n=None):
# if we are using numexpr, set the threads to n
# otherwise reset
if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
if n is None:
n = ne.detect_number_of_cores()
ne.set_num_threads(n)
def _evaluate_standard(op, op_str, a, b):
""" standard evaluation """
if _TEST_MODE:
_store_test_result(False)
with np.errstate(all="ignore"):
return op(a, b)
def _can_use_numexpr(op, op_str, a, b, dtype_check):
""" return a boolean if we WILL be using numexpr """
if op_str is not None:
# required min elements (otherwise we are adding overhead)
if np.prod(a.shape) > _MIN_ELEMENTS:
# check for dtype compatibility
dtypes = set()
for o in [a, b]:
# Series implements dtypes, check for dimension count as well
if hasattr(o, "dtypes") and o.ndim > 1:
s = o.dtypes.value_counts()
if len(s) > 1:
return False
dtypes |= set(s.index.astype(str))
# ndarray and Series Case
elif hasattr(o, "dtype"):
dtypes |= {o.dtype.name}
# allowed are a superset
if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
return True
return False
def _evaluate_numexpr(op, op_str, a, b):
result = None
if _can_use_numexpr(op, op_str, a, b, "evaluate"):
is_reversed = op.__name__.strip("_").startswith("r")
if is_reversed:
# we were originally called by a reversed op method
a, b = b, a
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate(
"a_value {op} b_value".format(op=op_str),
local_dict={"a_value": a_value, "b_value": b_value},
casting="safe",
)
if _TEST_MODE:
_store_test_result(result is not None)
if result is None:
result = _evaluate_standard(op, op_str, a, b)
return result
def _where_standard(cond, a, b):
return np.where(
values_from_object(cond), values_from_object(a), values_from_object(b)
)
def _where_numexpr(cond, a, b):
result = None
if _can_use_numexpr(None, "where", a, b, "where"):
cond_value = getattr(cond, "values", cond)
a_value = getattr(a, "values", a)
b_value = getattr(b, "values", b)
result = ne.evaluate(
"where(cond_value, a_value, b_value)",
local_dict={
"cond_value": cond_value,
"a_value": a_value,
"b_value": b_value,
},
casting="safe",
)
if result is None:
result = _where_standard(cond, a, b)
return result
# turn myself on
set_use_numexpr(get_option("compute.use_numexpr"))
def _has_bool_dtype(x):
if isinstance(x, ABCDataFrame):
return "bool" in x.dtypes
try:
return x.dtype == bool
except AttributeError:
return isinstance(x, (bool, np.bool_))
def _bool_arith_check(
op_str, a, b, not_allowed=frozenset(("/", "//", "**")), unsupported=None
):
if unsupported is None:
unsupported = {"+": "|", "*": "&", "-": "^"}
if _has_bool_dtype(a) and _has_bool_dtype(b):
if op_str in unsupported:
warnings.warn(
f"evaluating in Python space because the {repr(op_str)} "
f"operator is not supported by numexpr for "
f"the bool dtype, use {repr(unsupported[op_str])} instead"
)
return False
if op_str in not_allowed:
raise NotImplementedError(
f"operator {repr(op_str)} not implemented for bool dtypes"
)
return True
def evaluate(op, op_str, a, b, use_numexpr=True):
"""
Evaluate and return the expression of the op on a and b.
Parameters
----------
op : the actual operand
op_str : str
The string version of the op.
a : left operand
b : right operand
use_numexpr : bool, default True
Whether to try to use numexpr.
"""
use_numexpr = use_numexpr and _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b)
return _evaluate_standard(op, op_str, a, b)
def where(cond, a, b, use_numexpr=True):
"""
Evaluate the where condition cond on a and b.
Parameters
----------
cond : np.ndarray[bool]
a : return if cond is True
b : return if cond is False
use_numexpr : bool, default True
Whether to try to use numexpr.
"""
if use_numexpr:
return _where(cond, a, b)
return _where_standard(cond, a, b)
def set_test_mode(v=True):
"""
Keeps track of whether numexpr was used. Stores an additional ``True``
for every successful use of evaluate with numexpr since the last
``get_test_result``
"""
global _TEST_MODE, _TEST_RESULT
_TEST_MODE = v
_TEST_RESULT = []
def _store_test_result(used_numexpr):
global _TEST_RESULT
if used_numexpr:
_TEST_RESULT.append(used_numexpr)
def get_test_result():
"""get test result and reset test_results"""
global _TEST_RESULT
res = _TEST_RESULT
_TEST_RESULT = []
return res
|
py | 1a493c3d2733815f93700e05f2d2493ffa17dbad | from __future__ import with_statement
from time import time
from fabric.api import cd, run, env, roles
from fabric.decorators import task
from fabric.contrib.files import exists
env.use_ssh_config = True
releases_dir = "/home/deploy/issadmin/releases"
git_branch = "master"
git_repo = "https://github.com/wgerez/iss-dashboard.git"
repo_dir = "/home/deploy/issadmin/repo"
persist_dir = "/home/deploy/issadmin/persist"
next_release = "%(time).0f" % {'time': time()}
current_release = "/home/deploy/issadmin/current"
env.roledefs = {
'test': ['issqa'],
'production': ['iss']
}
@task
def deploy(migrate='no'):
init()
update_git()
create_release()
build_site()
if migrate=='yes':
migrate_from = "%s/%s" % (releases_dir, next_release)
migrate_forward(migrate_from)
swap_symlinks()
@task
def migrate():
migrate_forward()
@task
def migrate_back():
migrate_backward()
def migrate_forward(release_dir=None, env='production'):
if not release_dir:
release_dir=current_release
with cd(release_dir):
run('php artisan migrate --env=%s' % env)
def migrate_backward(release_dir=None, env='production'):
if not release_dir:
release_dir = current_release
with cd(release_dir):
run('php artisan migrate:rollback --env=%s' % env)
def init():
if not exists(releases_dir):
run("mkdir -p %s" % releases_dir)
if not exists(repo_dir):
run("git clone -b %s %s %s" % (git_branch, git_repo, repo_dir) )
if not exists("%s/storage" % persist_dir):
run("mkdir -p %s/storage/cache" % persist_dir)
run("mkdir -p %s/storage/fonts" % persist_dir)
run("mkdir -p %s/storage/logs" % persist_dir)
run("mkdir -p %s/storage/meta" % persist_dir)
run("mkdir -p %s/storage/sessions" % persist_dir)
run("mkdir -p %s/storage/views" % persist_dir)
def update_git():
with cd(repo_dir):
run("git checkout %s" % git_branch)
run("git pull origin %s" % git_branch)
def create_release():
release_into = "%s/%s" % (releases_dir, next_release)
run("mkdir -p %s" % release_into)
with cd(repo_dir):
run("git archive --worktree-attributes %s | tar -x -C %s" % (git_branch, release_into))
def build_site():
with cd("%s/%s" % (releases_dir, next_release)):
run("rm composer.lock")
run("composer install")
def swap_symlinks():
release_into = "%s/%s" % (releases_dir, next_release)
run("ln -nfs %s/database.php %s/app/config/database.php" % (persist_dir, release_into))
run("rm -rf %s/app/storage" % release_into)
run("rm -rf %s/public/alumnos" % release_into)
run("rm -rf %s/public/docentes" % release_into)
run("ln -nfs %s/storage %s/app/storage" % (persist_dir, release_into))
run("ln -nfs %s/alumnos %s/public/alumnos" % (persist_dir, release_into))
run("ln -nfs %s/docentes %s/public/docentes" % (persist_dir, release_into))
run("ln -nfs %s %s" % (release_into, current_release))
run("sudo service php7.0-fpm reload")
|
py | 1a493c4150d01339670a405515bdc3b49b329d7b | '''
This is the sample code from the homework. You shold NOT modify this file.
Instead, please copy this file to src/students/<your student ID>.py and
edit it there.
'''
import os
# Define global variables with upper case
SRC_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TEST_DATA_DIR = os.path.join(SRC_PATH, 'test_data')
def task_1(dummy=None):
'''
Task 1: Basic Syntax and Flake8 Checker
Python uses indentations to separate blocks instead of backets.
Unlike most programming language (like C++), indentations in Python
are required.
See https://www.python-course.eu/python3_blocks.php for some examples.
Flake8 (http://flake8.pycqa.org/en/latest/) could help you check these
syntax error. It also regular your coding style. For example, using
two whitespaces as indentation is allowed in Python. However, Flake8
will tell you it is an error "E111: indentation is not a multiple of four".
This is because when many people work on the same project, it would be
confusing if people are using different identation style.
Following the coding style in Flake8 is strongly suggested.
'''
# Hint:
# Run `python autograder.py -task 1 -student_id <your student ID>`
# under src/ to see if you pass this task.
# The correct output would be "Hello world" without any
# error. Note that passing this task does NOT mean you pass the
# Flake8 chcker. Please check your style with
# `flake8 src/student/<your student ID>.py`
# TODO: fix the syntax error for the following code
if True:
sentence = "Hello world"
print(sentence)
# End of TODO (do not change the code below)
return True
def task_2(
input_list: list = [1, 4, 53, 27, 9],
target_index: int = 0,
input_dictionary: dict = {"a": " taiwan", "b": 20, "c": "CSIE"},
target_key: str = "a"
) -> tuple:
'''
Task 2: Data Types
Python has many data types, including Boolean, String, Integer, Float,
List, Dictionary, etc.
You could use the function type() to see the data type:
>>> type(5)
<class 'int'>
>>> type("hi")
<class 'str'>
>>> type(9.2)
<class 'float'>
>>> type(["list", "could", "include", "different", "data type", 5, 3.2])
<class 'list'>
>>> type(("you could not change elements", "in a tuple"))
<class 'tuple'>
>>> type({"a": 1, "b":20})
<class 'dict'>
>>> type(True)
<class 'bool'>
>>>
Try to play with the Python IDE to see different data types by yourself.
In this task, you are asked to use these datatype.
Args:
input_list: a list with several items
target_index: target index for the input_list. You need to get the
list element with this index (i.e, 'input_list[target_index]')
input_dictionary: a dictionary with several key-value pairs.
target_key: target key for the input_dictionary You need to get the
value with this key (i.e., input_dictionary[target_key])
Returns:
input_list_length_and_sentence: a tuple that contains two elements.
The first one is an integer that indicates the length of input_list
The second one is a string that contains the combination of
input_list[target_index] and input_dictionary[target_key]
Examples:
Inputs:
input_list = [1, 3, 5, 7, 9]
target_index = 0
input_dictionary = {"1": "8", "f": "abc", "s": 5.5, "5.5" 900}
target_key = "5.5"
Returns:
input_list_length_and_sentence = (5, "1900")
Hints:
* Try to use print() to print out the inputs.
* Use len() to get the length of the list.
* Different data types could not be added. Use str() to convert data
to string.
* Run `python src/autograder.py -task 2 -student_id <your student ID>`
to see if you pass this task.
* The correct output would be (4, '1 taiwan')
'''
# TODO: change length and sentence to fit the requirement
length = len(input_list)
sentence = str(input_list[target_index])
sentence += str(input_dictionary[str(target_key)])
# End of TODO
input_list_length_and_sentence = (length, sentence)
print(input_list_length_and_sentence)
return input_list_length_and_sentence
def task_3(
number: int = 1314151677777
) -> list:
'''
Task 3: Conditions
Args:
number: a integer input
Returns:
prime_factors_below_10: a list of the number's largest factors
below 10
if the number is negative, return [-1]
if the number is zero, return [0]
Hints:
* Use % to get the remainder
* Using a loop (introduced in the next task) will make some
conditions simpler
'''
prime_factors_below_10 = []
# TODO: fill in the conditions
if number < 0:
prime_factors_below_10 = [-1]
# elif stands for "else if" in Python.
elif number == 0:
prime_factors_below_10 = [0]
else:
if number % 2 == 0:
prime_factors_below_10.append(2)
if number % 3 == 0:
prime_factors_below_10.append(3)
if number % 5 == 0:
prime_factors_below_10.append(5)
if number % 7 == 0:
prime_factors_below_10.append(7)
# End of TODO
print(prime_factors_below_10)
return prime_factors_below_10
def task_4(
numbers: list = [2, 4, 5, 6, 9]
) -> list:
'''
Task 4: For and While Loop
Args:
numbers: a list of integers
Returns:
list_of_stars: a list of stars (*)
For each number n in the list, you need to
append n lines of stars to the list, where
the first line has one star, the last line
has n stars.
Examples:
input:
[1, 3, 5]
output:
['*',
'*',
'**',
'***',
'*',
'**',
'***',
'****',
'*****']
Hints:
* You could create a string with repetitive substring by <str> * <int>
'''
list_of_stars = []
# In Python, the for loop could iterate through a list directly
for number in numbers:
# TODO: change stars to correct length
for i in range(1, number+1):
stars = "*" * i
list_of_stars.append(stars)
# End of TODO
# This could be done by the while loop
list_of_stars_while = []
i = 0
while i < len(numbers):
# TODO: change stars to correct length
j = 1
while j <= numbers[i]:
stars = "*" * j
j += 1 # This line is equivalant to j = j + 1
list_of_stars_while.append(stars)
i += 1
# End of TODO
print("=====> Output list_of_stars")
for stars in list_of_stars:
print(stars)
print("=====> Output list_of_stars_while")
for stars in list_of_stars_while:
print(stars)
for ans1, ans2 in zip(list_of_stars, list_of_stars_while):
assert ans1 == ans2
return list_of_stars
def task_5(
input_filename: str = 'task_5_input.txt',
output_filename: str = 'task_5_output.txt'
) -> str:
'''
Task 5: I/O with files
Args:
input_filename: input filename
output_filename: output filename
Returns:
lines: content in the output file without commas
Hints:
* Use <str>.split(something) to split a string into several substring
* Use fout.write(something) to write text into the output file
'''
input_filename = os.path.join(TEST_DATA_DIR, input_filename)
output_filename = os.path.join(TEST_DATA_DIR, output_filename)
# Remove previous output file
if os.path.exists(output_filename):
os.remove(output_filename)
with open(input_filename, 'r') as fin, open(output_filename, 'w') as fout:
lines = fin.readlines()
print(f"=======> Input file content:")
for line in lines:
print(f"{line}")
# TODO: read the content of the input file, where words are separate by
# commas. Please remove the commas and write words to the output file
line = line.replace(",", "")
fout.write(line)
pass
# End of TODO
with open(output_filename, 'r') as fin:
lines = fin.readlines()
print(f"=======> Output file content:")
print(lines)
return "".join(lines)
def task_6(
matrix: list = [[-0.5, 1], [1, 0.5], [-1, 0.5], [-1, -0.5]],
vector: list = [1, 0.5]
) -> list:
'''
Task 6: Functions
Args:
matrix: a list of v1
vector: v2
Returns:
cos_sims: a list of cosine similarity between v1s and v2
Hints:
* A good function name should be self-explained
* A good function should be less than 30 lines
* A good function should include comments to explain how to use it
* Cosine similarity of the vector itself will be 0.9999999 instead of 1
'''
# You could define function B in function A, but function B could only
# be used in the scope of function A
def dot_product(v1, v2):
assert len(v1) == len(v2)
return sum(a*b for a, b in zip(v1, v2))
def norm(vector):
# Note that this function would have some minor error due to the
# approximation of square root
return dot_product(vector, vector) ** 0.5
def get_cosine_simialrity(v1, v2):
'''
Calculate the cosine similarity = v1 * v2 / (|v1| * |v2|)
'''
# TODO: use the above functions to calculate cosine similarity of
# the two vectors v1 and v2
cos_sim = dot_product(v1, v2) / (norm(v1) * norm(v2))
# End of TODO
return cos_sim
cos_sims = []
for v1 in matrix:
cos_sim = get_cosine_simialrity(v1, vector)
print(f"Cosine similarity between {v1} and {vector}: {cos_sim}")
cos_sims.append(cos_sim)
return cos_sims
class Student():
def __init__(self, student_id, time):
self.student_id = student_id
self.time = time
self.words_to_say = "initial value"
def set_words_to_say(self, words_to_say):
self.words_to_say = words_to_say
def hello(self):
return (
f"Hello, {self.student_id}! Time is {self.time}. "
f"I want to say {self.words_to_say}"
)
def task_7(
student_id: str = 'test_id',
time: str = '2018_11_24_0000'
) -> Student:
'''
Task 7: Class
Args:
student_id: someone's student ID
time: a certain time
Returns:
student: an Student object
Hints:
* Use Student(parameters1, parameters2 ...) to create an object
and assign it to a variable
* Use <created object>.<object function> to call object function
'''
# TODO: create a student object with different words to say
student = Student(student_id, time)
student.set_words_to_say("Hi")
# End of TODO
print(student.hello())
return student
def task_8(
img_url: str = 'https://i.imgur.com/B75zq0x.jpg'
) -> object:
'''
Task 8: Module
Args:
img_url: address of an image
Returns:
result_img: an PIL Image
Hints:
* Make sure you have installed the PIL package
* Take a look at utils.py first
* You could easily find answers with Google
'''
from urllib import request
result_img = None
# TODO: download the image from img_url with the request module
# and add your student ID on it with draw_text() in the utils module
# under src/.
from PIL import Image
import utils
response = request.urlopen(img_url)
result_img = Image.open(response)
result_img = utils.draw_text(result_img, "b07902005")
# You are allowed to change the img_url to your own image URL.
# Display the image:
# result_img.show()
# Note: please comment this line when hand in.
# If you are running on a server, use
# result.save('test.jpg')
# and copy the file to local or use Jupyter Notebook to render.
# End of TODO
return result_img
|
py | 1a493c53e2189ffd57b13c63afad680b95cf872f | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle.fluid as fluid
import parl
from parl import layers
from parl.utils import machine_info
class AtariAgent(parl.Agent):
def __init__(self, algorithm, obs_shape, act_dim,
learn_data_provider=None):
assert isinstance(obs_shape, (list, tuple))
assert isinstance(act_dim, int)
self.obs_shape = obs_shape
self.act_dim = act_dim
super(AtariAgent, self).__init__(algorithm)
if learn_data_provider:
self.learn_reader.decorate_tensor_provider(learn_data_provider)
self.learn_reader.start()
def build_program(self):
self.sample_program = fluid.Program()
self.predict_program = fluid.Program()
self.learn_program = fluid.Program()
with fluid.program_guard(self.sample_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
self.sample_actions, self.behaviour_logits = self.alg.sample(obs)
with fluid.program_guard(self.predict_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
self.predict_actions = self.alg.predict(obs)
with fluid.program_guard(self.learn_program):
obs = layers.data(
name='obs', shape=self.obs_shape, dtype='float32')
actions = layers.data(name='actions', shape=[], dtype='int64')
behaviour_logits = layers.data(
name='behaviour_logits', shape=[self.act_dim], dtype='float32')
rewards = layers.data(name='rewards', shape=[], dtype='float32')
dones = layers.data(name='dones', shape=[], dtype='float32')
lr = layers.data(
name='lr', shape=[1], dtype='float32', append_batch_size=False)
entropy_coeff = layers.data(
name='entropy_coeff', shape=[], dtype='float32')
self.learn_reader = fluid.layers.create_py_reader_by_data(
capacity=32,
feed_list=[
obs, actions, behaviour_logits, rewards, dones, lr,
entropy_coeff
])
obs, actions, behaviour_logits, rewards, dones, lr, entropy_coeff = fluid.layers.read_file(
self.learn_reader)
vtrace_loss, kl = self.alg.learn(obs, actions, behaviour_logits,
rewards, dones, lr, entropy_coeff)
self.learn_outputs = [
vtrace_loss.total_loss, vtrace_loss.pi_loss,
vtrace_loss.vf_loss, vtrace_loss.entropy, kl
]
self.learn_program = parl.compile(self.learn_program, total_loss)
def sample(self, obs_np):
"""
Args:
obs_np: a numpy float32 array of shape ([B] + observation_space).
Format of image input should be NCHW format.
Returns:
sample_ids: a numpy int64 array of shape [B]
"""
obs_np = obs_np.astype('float32')
sample_actions, behaviour_logits = self.fluid_executor.run(
self.sample_program,
feed={'obs': obs_np},
fetch_list=[self.sample_actions, self.behaviour_logits])
return sample_actions, behaviour_logits
def predict(self, obs_np):
"""
Args:
obs_np: a numpy float32 array of shape ([B] + observation_space)
Format of image input should be NCHW format.
Returns:
sample_ids: a numpy int64 array of shape [B]
"""
obs_np = obs_np.astype('float32')
predict_actions = self.fluid_executor.run(
self.predict_program,
feed={'obs': obs_np},
fetch_list=[self.predict_actions])[0]
return predict_actions
def learn(self):
total_loss, pi_loss, vf_loss, entropy, kl = self.fluid_executor.run(
self.learn_program, fetch_list=self.learn_outputs)
return total_loss, pi_loss, vf_loss, entropy, kl
|
py | 1a493d842914d100ed1e00863df1920556d90e35 | """Utilities for calculating and reporting statistics about types."""
import cgi
import os.path
import re
from typing import Any, Dict, List, cast, Tuple
from mypy.traverser import TraverserVisitor
from mypy.types import (
Type, AnyType, Instance, FunctionLike, TupleType, Void, TypeVarType,
TypeQuery, ANY_TYPE_STRATEGY, CallableType
)
from mypy import nodes
from mypy.nodes import (
Node, FuncDef, TypeApplication, AssignmentStmt, NameExpr, CallExpr,
MemberExpr, OpExpr, ComparisonExpr, IndexExpr, UnaryExpr, YieldFromExpr
)
TYPE_EMPTY = 0
TYPE_PRECISE = 1
TYPE_IMPRECISE = 2
TYPE_ANY = 3
precision_names = [
'empty',
'precise',
'imprecise',
'any',
]
class StatisticsVisitor(TraverserVisitor):
def __init__(self, inferred: bool, typemap: Dict[Node, Type] = None,
all_nodes: bool = False) -> None:
self.inferred = inferred
self.typemap = typemap
self.all_nodes = all_nodes
self.num_precise = 0
self.num_imprecise = 0
self.num_any = 0
self.num_simple = 0
self.num_generic = 0
self.num_tuple = 0
self.num_function = 0
self.num_typevar = 0
self.num_complex = 0
self.line = -1
self.line_map = {} # type: Dict[int, int]
self.output = [] # type: List[str]
TraverserVisitor.__init__(self)
def visit_func_def(self, o: FuncDef) -> None:
self.line = o.line
if len(o.expanded) > 1:
if o in o.expanded:
print('ERROR: cycle in function expansion; skipping')
return
for defn in o.expanded:
self.visit_func_def(cast(FuncDef, defn))
else:
if o.type:
sig = cast(CallableType, o.type)
arg_types = sig.arg_types
if (sig.arg_names and sig.arg_names[0] == 'self' and
not self.inferred):
arg_types = arg_types[1:]
for arg in arg_types:
self.type(arg)
self.type(sig.ret_type)
elif self.all_nodes:
self.record_line(self.line, TYPE_ANY)
super().visit_func_def(o)
def visit_type_application(self, o: TypeApplication) -> None:
self.line = o.line
for t in o.types:
self.type(t)
super().visit_type_application(o)
def visit_assignment_stmt(self, o: AssignmentStmt) -> None:
self.line = o.line
if (isinstance(o.rvalue, nodes.CallExpr) and
isinstance(cast(nodes.CallExpr, o.rvalue).analyzed,
nodes.TypeVarExpr)):
# Type variable definition -- not a real assignment.
return
if o.type:
self.type(o.type)
elif self.inferred:
for lvalue in o.lvalues:
if isinstance(lvalue, nodes.TupleExpr):
items = lvalue.items
elif isinstance(lvalue, nodes.ListExpr):
items = lvalue.items
else:
items = [lvalue]
for item in items:
if hasattr(item, 'is_def') and cast(Any, item).is_def:
t = self.typemap.get(item)
if t:
self.type(t)
else:
self.log(' !! No inferred type on line %d' %
self.line)
self.record_line(self.line, TYPE_ANY)
super().visit_assignment_stmt(o)
def visit_name_expr(self, o: NameExpr) -> None:
self.process_node(o)
super().visit_name_expr(o)
def visit_yield_from_expr(self, o: YieldFromExpr) -> None:
if o.expr:
o.expr.accept(self)
def visit_call_expr(self, o: CallExpr) -> None:
self.process_node(o)
if o.analyzed:
o.analyzed.accept(self)
else:
o.callee.accept(self)
for a in o.args:
a.accept(self)
def visit_member_expr(self, o: MemberExpr) -> None:
self.process_node(o)
super().visit_member_expr(o)
def visit_op_expr(self, o: OpExpr) -> None:
self.process_node(o)
super().visit_op_expr(o)
def visit_comparison_expr(self, o: ComparisonExpr) -> None:
self.process_node(o)
super().visit_comparison_expr(o)
def visit_index_expr(self, o: IndexExpr) -> None:
self.process_node(o)
super().visit_index_expr(o)
def visit_unary_expr(self, o: UnaryExpr) -> None:
self.process_node(o)
super().visit_unary_expr(o)
def process_node(self, node: Node) -> None:
if self.all_nodes:
typ = self.typemap.get(node)
if typ:
self.line = node.line
self.type(typ)
def type(self, t: Type) -> None:
if isinstance(t, AnyType):
self.log(' !! Any type around line %d' % self.line)
self.num_any += 1
self.record_line(self.line, TYPE_ANY)
elif ((not self.all_nodes and is_imprecise(t)) or
(self.all_nodes and is_imprecise2(t))):
self.log(' !! Imprecise type around line %d' % self.line)
self.num_imprecise += 1
self.record_line(self.line, TYPE_IMPRECISE)
else:
self.num_precise += 1
self.record_line(self.line, TYPE_PRECISE)
if isinstance(t, Instance):
if t.args:
if any(is_complex(arg) for arg in t.args):
self.num_complex += 1
else:
self.num_generic += 1
else:
self.num_simple += 1
elif isinstance(t, Void):
self.num_simple += 1
elif isinstance(t, FunctionLike):
self.num_function += 1
elif isinstance(t, TupleType):
if any(is_complex(item) for item in t.items):
self.num_complex += 1
else:
self.num_tuple += 1
elif isinstance(t, TypeVarType):
self.num_typevar += 1
def log(self, string: str) -> None:
self.output.append(string)
def record_line(self, line: int, precision: int) -> None:
self.line_map[line] = max(precision,
self.line_map.get(line, TYPE_PRECISE))
def dump_type_stats(tree: Node, path: str, inferred: bool = False,
typemap: Dict[Node, Type] = None) -> None:
if is_special_module(path):
return
print(path)
visitor = StatisticsVisitor(inferred, typemap)
tree.accept(visitor)
for line in visitor.output:
print(line)
print(' ** precision **')
print(' precise ', visitor.num_precise)
print(' imprecise', visitor.num_imprecise)
print(' any ', visitor.num_any)
print(' ** kinds **')
print(' simple ', visitor.num_simple)
print(' generic ', visitor.num_generic)
print(' function ', visitor.num_function)
print(' tuple ', visitor.num_tuple)
print(' TypeVar ', visitor.num_typevar)
print(' complex ', visitor.num_complex)
print(' any ', visitor.num_any)
def is_special_module(path: str) -> bool:
return os.path.basename(path) in ('abc.py', 'typing.py', 'builtins.py')
def is_imprecise(t: Type) -> bool:
return t.accept(HasAnyQuery())
class HasAnyQuery(TypeQuery):
def __init__(self) -> None:
super().__init__(False, ANY_TYPE_STRATEGY)
def visit_any(self, t: AnyType) -> bool:
return True
def visit_instance(self, t: Instance) -> bool:
if t.type.fullname() == 'builtins.tuple':
return True
else:
return super().visit_instance(t)
def is_imprecise2(t: Type) -> bool:
return t.accept(HasAnyQuery2())
class HasAnyQuery2(HasAnyQuery):
def visit_callable_type(self, t: CallableType) -> bool:
# We don't want to flag references to functions with some Any
# argument types (etc.) since they generally don't mean trouble.
return False
def is_generic(t: Type) -> bool:
return isinstance(t, Instance) and bool(cast(Instance, t).args)
def is_complex(t: Type) -> bool:
return is_generic(t) or isinstance(t, (FunctionLike, TupleType,
TypeVarType))
html_files = [] # type: List[Tuple[str, str, int, int]]
def generate_html_report(tree: Node, path: str, type_map: Dict[Node, Type],
output_dir: str) -> None:
if is_special_module(path):
return
# There may be more than one right answer for "what should we do here?"
# but this is a reasonable one.
path = os.path.relpath(path)
if path.startswith('..'):
return
visitor = StatisticsVisitor(inferred=True, typemap=type_map, all_nodes=True)
tree.accept(visitor)
assert not os.path.isabs(path) and not path.startswith('..')
# This line is *wrong* if the preceding assert fails.
target_path = os.path.join(output_dir, 'html', path)
# replace .py or .pyi with .html
target_path = os.path.splitext(target_path)[0] + '.html'
assert target_path.endswith('.html')
ensure_dir_exists(os.path.dirname(target_path))
output = [] # type: List[str]
append = output.append
append('''\
<html>
<head>
<style>
.red { background-color: #faa; }
.yellow { background-color: #ffa; }
.white { }
.lineno { color: #999; }
</style>
</head>
<body>
<pre>''')
num_imprecise_lines = 0
num_lines = 0
with open(path) as input_file:
for i, line in enumerate(input_file):
lineno = i + 1
status = visitor.line_map.get(lineno, TYPE_PRECISE)
style_map = {TYPE_PRECISE: 'white',
TYPE_IMPRECISE: 'yellow',
TYPE_ANY: 'red'}
style = style_map[status]
append('<span class="lineno">%4d</span> ' % lineno +
'<span class="%s">%s</span>' % (style,
cgi.escape(line)))
if status != TYPE_PRECISE:
num_imprecise_lines += 1
if line.strip():
num_lines += 1
append('</pre>')
append('</body></html>')
with open(target_path, 'w') as output_file:
output_file.writelines(output)
target_path = target_path[len(output_dir) + 1:]
html_files.append((path, target_path, num_lines, num_imprecise_lines))
def generate_html_index(output_dir: str) -> None:
path = os.path.join(output_dir, 'index.html')
output = [] # type: List[str]
append = output.append
append('''\
<html>
<head>
<style>
body { font-family: courier; }
table { border-collapse: collapse; }
table tr td { border: 1px solid black; }
td { padding: 0.4em; }
.red { background-color: #faa; }
.yellow { background-color: #ffa; }
</style>
</head>
<body>''')
append('<h1>Mypy Type Check Coverage Report</h1>\n')
append('<table>\n')
for source_path, target_path, num_lines, num_imprecise in sorted(html_files):
if num_lines == 0:
continue
source_path = os.path.normpath(source_path)
# TODO: Windows paths.
if (source_path.startswith('stubs/') or
'/stubs/' in source_path):
continue
percent = 100.0 * num_imprecise / num_lines
style = ''
if percent >= 20:
style = 'class="red"'
elif percent >= 5:
style = 'class="yellow"'
append('<tr %s><td><a href="%s">%s</a><td>%.1f%% imprecise<td>%d LOC\n' % (
style, target_path, source_path, percent, num_lines))
append('</table>\n')
append('</body></html>')
with open(path, 'w') as file:
file.writelines(output)
print('Generated HTML report (old): %s' % os.path.abspath(path))
def ensure_dir_exists(dir: str) -> None:
if not os.path.exists(dir):
os.makedirs(dir)
|
py | 1a493e74d0ae1f2791b7605b4790489b992313b0 | #025: Crie um programa que leia o nome de uma pessoa e diga se ela tem "SILVA" no nome.
nome = str(input('Escreva seu nome: '))
nome = nome.title()
nome = nome.strip()
nomeA = nome.split()
if ('Silva' in nome):
print('Seu nome tem Silva!')
else:
print('Seu nome não tem Silva!')
|
py | 1a493f1188506ebd0301c8b3461398680a04c8d5 | """String functions in R"""
import re
import numpy as np
from pipda import register_func
from ..core.backends import pandas as pd
from ..core.backends.pandas import Series
from ..core.backends.pandas.core.base import PandasObject
from ..core.backends.pandas.core.groupby import SeriesGroupBy
from ..core.backends.pandas.api.types import is_string_dtype, is_scalar
from ..core.tibble import Tibble, TibbleGrouped, TibbleRowwise
from ..core.contexts import Context
from ..core.factory import func_factory, dispatching
from ..core.utils import (
arg_match,
logger,
regcall,
)
from .casting import _as_type
from .testing import _register_type_testing
from .logical import as_logical
@register_func(None, context=Context.EVAL)
def as_character(
x,
str_dtype=str,
_na=np.nan,
):
"""Convert an object or elements of an iterable into string
Aliases `as_str` and `as_string`
Args:
x: The object
str_dtype: The string dtype to convert to
_na: How NAs should be casted. Specify np.nan will keep them unchanged.
But the dtype will be object then.
Returns:
When x is an array or a series, return x.astype(str).
When x is iterable, convert elements of it into strings
Otherwise, convert x to string.
"""
return _as_type(x, str_dtype, na=_na)
as_str = as_string = as_character
is_character = _register_type_testing(
"is_character",
scalar_types=(str, np.str_),
dtype_checker=is_string_dtype,
doc="""Test if a value is characters/string
Alias `is_str` and `is_string`
Args:
x: The value to be checked
Returns:
True if the value is string; with a string dtype;
or all elements are strings
""",
)
is_str = is_string = is_character
# Grep family -----------------------------------
@dispatching(kind="transform", qualname="datar.base.grep")
def _grep(
x, pattern, ignore_case=False, value=False, fixed=False, invert=False
):
matched = _grepl.dispatch(Series)(
x,
pattern,
ignore_case=ignore_case,
fixed=fixed,
invert=invert,
)
if value:
return x[matched]
return np.flatnonzero(matched)
@register_func(None, context=Context.EVAL)
def grep(
pattern,
x,
ignore_case=False,
value=False,
fixed=False,
invert=False,
):
"""R's grep, get the element in x matching the pattern
Args:
pattern: The pattern
x: A string or an iterable of strings; or those can be coerced to
ignore_case: Do case-insensitive matching?
value: Return values instead of indices?
fixed: Fixed matching (instead of regex matching)?
invert: Return elements thata don't match instead?
Returns:
The matched (or unmatched (`invert=True`)) indices
(or values (`value=True`)).
"""
return _grep(
x if isinstance(x, (Series, SeriesGroupBy)) else Series(x),
pattern,
ignore_case=ignore_case,
value=value,
fixed=fixed,
invert=invert,
)
@dispatching(kind="transform", qualname="datar.base.grepl")
def _grepl(x, pattern, ignore_case, fixed, invert):
pattern = _warn_more_pat_or_rep(pattern, "grepl")
return _match(
x,
pattern,
ignore_case=ignore_case,
invert=invert,
fixed=fixed,
)
@register_func(None, context=Context.EVAL)
def grepl(
pattern,
x,
ignore_case=False,
fixed=False,
invert=False,
):
"""R's grepl, check whether elements in x matching the pattern
Args:
pattern: The pattern
x: A string or an iterable of strings; or those can be coerced to
ignore_case: Do case-insensitive matching?
fixed: Fixed matching (instead of regex matching)?
invert: Return elements thata don't match instead?
Returns:
A bool array indicating whether the elements in x match the pattern
"""
return _grepl(
x if isinstance(x, (Series, SeriesGroupBy)) else Series(x),
pattern,
ignore_case=ignore_case,
fixed=fixed,
invert=invert,
)
@dispatching(kind="transform", qualname="datar.base.sub")
def _sub(x, pattern, replacement, ignore_case, fixed):
return _sub_(
pattern=pattern,
replacement=replacement,
x=x,
ignore_case=ignore_case,
fixed=fixed,
)
@register_func(None, context=Context.EVAL)
def sub(
pattern,
replacement,
x,
ignore_case=False,
fixed=False,
):
"""R's sub, replace a pattern with replacement for elements in x,
each only once
Args:
pattern: The pattern
replacement: The replacement
x: A string or an iterable of strings; or those can be coerced to
ignore_case: Do case-insensitive matching?
fixed: Fixed matching (instead of regex matching)?
Returns:
An array of strings with matched parts replaced.
"""
return _sub(
x if isinstance(x, (Series, SeriesGroupBy)) else Series(x),
pattern,
replacement,
ignore_case=ignore_case,
fixed=fixed,
)
@dispatching(kind="transform", qualname="datar.base.gsub")
def _gsub(x, pattern, replacement, ignore_case, fixed):
return _sub_(
pattern=pattern,
replacement=replacement,
x=x,
ignore_case=ignore_case,
fixed=fixed,
count=0,
fun="gsub",
)
@register_func(None, context=Context.EVAL)
def gsub(
pattern,
replacement,
x,
ignore_case=False,
fixed=False,
):
"""R's gsub, replace a pattern with replacement for elements in x,
each for all matched parts
See Also:
[sub()](datar.base.string.sub)
"""
return _gsub(
x if isinstance(x, (Series, SeriesGroupBy)) else Series(x),
pattern,
replacement,
ignore_case=ignore_case,
fixed=fixed,
)
# Grep family helpers --------------------------------
def _warn_more_pat_or_rep(pattern, fun, arg="pattern"):
"""Warn when there are more than one pattern or replacement provided"""
if is_scalar(pattern):
return pattern
if len(pattern) == 1:
return pattern[0]
logger.warning(
"In %s(...), argument `%s` has length > 1 and only the "
"first element will be used",
fun,
arg,
)
return pattern[0]
def _match(text, pattern, ignore_case, invert, fixed):
"""Do the regex match"""
if pd.isnull(text):
return False
flags = re.IGNORECASE if ignore_case else 0
if fixed:
pattern = re.escape(pattern)
pattern = re.compile(pattern, flags)
matched = pattern.search(text)
if invert:
matched = not bool(matched)
return bool(matched)
_match = np.vectorize(_match, excluded={"pattern"})
def _sub_(
pattern,
replacement,
x,
ignore_case=False,
fixed=False,
count=1,
fun="sub",
):
"""Replace a pattern with replacement for elements in x,
with argument count available
"""
pattern = _warn_more_pat_or_rep(pattern, fun)
replacement = _warn_more_pat_or_rep(replacement, fun, "replacement")
if fixed:
pattern = re.escape(pattern)
flags = re.IGNORECASE if ignore_case else 0
pattern = re.compile(pattern, flags)
return pattern.sub(repl=replacement, count=count, string=x)
_sub_ = np.vectorize(_sub_, excluded={"pattern", "replacement"})
@func_factory("transform", "x")
def nchar(
x,
type="chars",
allow_na=True, # i.e.: '\ud861'
keep_na=None,
_na_len=2,
):
"""Get the size of the elements in x"""
x, keep_na = _prepare_nchar(x, type, keep_na)
return _nchar_scalar(
x, retn=type, allow_na=allow_na, keep_na=keep_na, na_len=_na_len
)
@func_factory("transform", "x")
def nzchar(x, keep_na=False):
"""Find out if elements of a character vector are non-empty strings.
Args:
x: Strings to test
keep_na: What to return when for NA's
Returns:
A bool array to tell whether elements in x are non-empty strings
"""
x = regcall(as_character, x, _na=np.nan if keep_na else "")
if not keep_na:
return x.fillna(False).astype(bool)
return as_logical(x, na=np.nan)
# nchar helpers --------------------------------
def _prepare_nchar(x, type, keep_na):
"""Prepare arguments for n(z)char"""
arg_match(type, "type", ["chars", "bytes", "width"])
if keep_na is None:
keep_na = type != "width"
return regcall(as_character, x), keep_na
@np.vectorize
def _nchar_scalar(x, retn, allow_na, keep_na, na_len):
"""Get the size of a scalar string"""
if pd.isnull(x):
return np.nan if keep_na else na_len
if retn == "width":
try:
from wcwidth import wcswidth
except ImportError as imperr: # pragma: no cover
raise ValueError(
"`nchar(x, type='width')` requires `wcwidth` package.\n"
"Try: pip install -U datar[wcwidth]"
) from imperr
return wcswidth(x)
if retn == "chars":
return len(x)
try:
x = x.encode("utf-8")
except UnicodeEncodeError:
if allow_na:
return np.nan
raise
return len(x)
# paste and paste0 --------------------
_is_empty = lambda x: (
(is_scalar(x) and not x) or (not is_scalar(x) and len(x) == 0)
)
@register_func(None, context=Context.EVAL)
def paste(*args, sep=" ", collapse=None):
"""Concatenate vectors after converting to character.
Args:
*args: strings to be concatenated
sep: The separator
collapse: The separator to collapse the final string arrays
Returns:
A single string if collapse is given, otherwise an array of strings.
"""
if len(args) == 1 and isinstance(args[0], TibbleRowwise):
out = args[0].apply(
lambda row: row.astype(str).str.cat(sep=sep), axis=1
)
return collapse.join(out) if collapse else out
from ..tibble import tibble
if all(_is_empty(arg) for arg in args):
df = tibble(*args, _name_repair="minimal")
else:
df = tibble(
*("" if _is_empty(arg) else arg for arg in args),
_name_repair="minimal",
)
if not isinstance(df, TibbleGrouped):
out = df.apply(lambda col: col.astype(str).str.cat(sep=sep), axis=1)
if collapse:
return collapse.join(out)
if any(isinstance(x, PandasObject) for x in args):
return out
return np.array(out, dtype=object)
out = df.apply(
lambda row: row.astype(str).str.cat(sep=sep), axis=1
).groupby(df._datar["grouped"].grouper)
if collapse:
out = out.agg(lambda x: x.str.cat(sep=collapse))
return out
@register_func(None, context=Context.EVAL)
def paste0(*args, sep="", collapse=None):
"""Paste with empty string as sep"""
return regcall(paste, *args, sep="", collapse=collapse)
# sprintf ----------------------------------------------------------------
@register_func(None, context=Context.EVAL)
def sprintf(fmt, *args):
"""C-style String Formatting
Args:
fmt: The formats
*args: The values
Returns:
A scalar string if all fmt, *args are scalar strings, otherwise
an array of formatted strings
"""
if is_scalar(fmt) and all(is_scalar(x) for x in args):
if pd.isnull(fmt):
return np.nan
return fmt % args
from ..tibble import tibble
df = tibble(fmt, *args, _name_repair="minimal")
aggfunc = lambda row: (
np.nan
if pd.isnull(row.values[0])
else row.values[0] % tuple(row.values[1:])
)
if isinstance(df, TibbleGrouped):
return Tibble(df, copy=False).agg(aggfunc, axis=1).groupby(
df._datar["grouped"].grouper
)
return df.agg(aggfunc, axis=1)
# substr, substring ----------------------------------
@func_factory("transform", "x")
def substr(x, start, stop):
"""Extract substrings in strings.
Args:
x: The strings
start: The start positions to extract
stop: The stop positions to extract
Returns:
The substrings from `x`
"""
x = regcall(as_character, x)
return x.str[start:stop]
@func_factory("transform", "x")
def substring(x, first, last=1000000):
"""Extract substrings in strings.
Args:
x: The strings
start: The start positions to extract
stop: The stop positions to extract
Returns:
The substrings from `x`
"""
x = regcall(as_character, x)
return x.str[first:last]
# strsplit --------------------------------
@func_factory("transform", {"x", "split"})
def strsplit(x, split, fixed=False):
"""Split strings by separator
Args:
x: The strings. Have to be strings, no casting will be done.
split: The separators to split
fixed: fixed matching (instead of regex matching)?
Returns:
List of split strings of x if both x and split are scalars. Otherwise,
an array of split strings
"""
def split_str(string, sep):
if fixed:
return string.split(sep)
sep = re.compile(sep)
return sep.split(string)
return np.vectorize(split_str, [object])(x, split)
# startsWith, endsWith
@func_factory("transform", "x")
def startswith(x, prefix):
"""Determines if entries of x start with prefix
Args:
x: A vector of strings or a string
prefix: The prefix to test against
Returns:
A bool vector for each element in x if element startswith the prefix
"""
x = regcall(as_character, x)
return x.str.startswith(prefix)
@func_factory("transform", "x")
def endswith(x, suffix):
"""Determines if entries of x end with suffix
Args:
x: A vector of strings or a string
suffix: The suffix to test against
Returns:
A bool vector for each element in x if element endswith the suffix
"""
x = regcall(as_character, x)
return x.str.endswith(suffix)
@func_factory("transform", "x")
def strtoi(x, base=0):
"""Convert strings to integers according to the given base
Args:
x: A string or vector of strings
base: an integer which is between 2 and 36 inclusive, or zero.
With zero, a suitable base will be chosen following the C rules.
Returns:
Converted integers
"""
return x.transform(int, base=base)
@func_factory("transform", "x")
def chartr(old, new, x):
"""Replace strings char by char
Args:
x: A string or vector of strings
old: A set of characters to replace
new: A set of characters to replace with
Returns:
The strings in x being replaced
"""
old = _warn_more_pat_or_rep(old, "chartr", "old")
new = _warn_more_pat_or_rep(new, "chartr", "new")
if len(old) > len(new):
raise ValueError("'old' is longer than 'new'")
new = new[: len(old)]
for oldc, newc in zip(old, new):
x = x.str.replace(oldc, newc)
return x
@func_factory("transform", "x")
def tolower(x):
"""Convert strings to lower case
Args:
x: A string or vector of strings
Returns:
Converted strings
"""
x = regcall(as_character, x)
return x.str.lower()
@func_factory("transform", "x")
def toupper(x):
"""Convert strings to upper case
Args:
x: A string or vector of strings
Returns:
Converted strings
"""
x = regcall(as_character, x)
return x.str.upper()
@func_factory("transform", "x")
def trimws(x, which="both", whitespace=r"[ \t\r\n]"):
"""Remove leading and/or trailing whitespace from character strings.
Args:
x: A string or vector of strings
which: A character string specifying whether to remove
both leading and trailing whitespace (default),
or only leading ("left") or trailing ("right").
whitespace: a string specifying a regular expression to
match (one character of) “white space”
Returns:
The strings with whitespaces removed
"""
which = arg_match(which, "which", ["both", "left", "right"])
x = regcall(as_character, x)
if which == "both":
expr = f"^{whitespace}|{whitespace}$"
elif which == "left":
expr = f"^{whitespace}"
else:
expr = f"{whitespace}$"
return np.vectorize(re.sub, excluded={"pattern", "repl"})(expr, "", x)
|
py | 1a49410b58605f266658f970701eb4ca7180786f |
from oud.models import Participant
from django.contrib import admin
# Register your models here.
admin.site.register(Participant) |
py | 1a4941216b7666ed7f5683638ec1b76b821dccec | #!/usr/bin/env python
"""Basic pipeline building blocks.
This modules provides the basic building blocks in a JIP pipeline and a way
to search and find them at run-time. The basic buiding blocks are instances
of :py:class:`Tool`. The JIP library comes with two sub-classes that can be
used to create tool implementations:
:py:class:`ScriptTool`
This sub-class of `Tool` integrates file or script based tool
implementations which can be served from stand-alone script files
:py:class:`PythonTool`
In contrast to the script tool, this `Tool` extension allows to create
`Tool` instances from other, possibly non-related, python classes. The
easiest way to used this is with the :py:class:`jip.tools.tool` decorator,
which allows you to take arbitrary python classes and *make* them jip
tools.
In addition to the `Tool` implementations, this module provides the
:py:class:`Scanner` class, which is used to find tool implementations either
form disk or from an arbitrary python module. This class is supposed to be
used as a *singleton* and an configured instance is available in the main
`jip` module, exposed as `jip.scanner`. The scanner class itself is
configured either through the :py:mod:`jip.configuration`, or through
environment variables. The :py:class:`Scanner` documentation covers both
the environment variables that can be used as well as the configuration
properties.
"""
import copy
import inspect
from textwrap import dedent
from os import remove, getcwd, getenv, listdir
from os.path import exists, basename, dirname, abspath
import os
import sys
import types
import shutil
import base64
import jip.templates
from jip.options import Options, TYPE_OUTPUT, TYPE_INPUT, Option
from jip.templates import render_template, set_global_context
from jip.utils import list_dir
from jip.logger import getLogger
from jip.six import iteritems, string_types, PY3, PY2
from jip.six.moves import cPickle
import jip.profiles
from io import IOBase
log = getLogger('jip.tools')
# the pickle template to store a pyton tool
_pickel_template = """
python -c '
import sys
try:
import pickle
except ImportError:
import cPickle as pickle
import jip
import jip.tools
import types
import base64
jip._disable_module_search = True
source=base64.b64decode("".join([l for l in sys.stdin]))
data = pickle.loads(source)
deco = jip.tools.tool()
tool = jip.tools.PythonTool(
data["instance"],
deco
)
tool._options = data["options"]
if isinstance(tool, types.FunctionType):
tool()
else:
tool.run()
'<< __EOF__
%s__EOF__
"""
#########################################################
# Exceptions
#########################################################
class ValidationError(Exception):
"""Exception raised in validation steps. The exception
carries the source tool and a message.
"""
def __init__(self, source, message):
self.source = source
self.message = message
def __repr__(self):
import jip.cli
if self.source:
return "%s: %s" % (
jip.cli.colorize(self.source, jip.cli.RED),
jip.cli.colorize(self.message, jip.cli.BLUE)
)
else:
return "%s" % (
jip.cli.colorize(self.message, jip.cli.RED)
)
def __str__(self):
return self.__repr__()
class ToolNotFoundException(Exception):
"""Raised in case a tool is not found by the scanner"""
pass
#########################################################
# decorators
#########################################################
class tool(object):
"""Decorate functions and classes and convert them to tools.
The @jip.tool decorator turns classes and functions into valid JIP
tools. The simplest way to use this decorator is to annotate a python
function that returns a string. This string is then interpreted as a
JIP script template. The functions docstring is used, similar to
JIP scripts, to parse command line options and tool input and
output parameters. For example::
@tool()
def mytool():
'''
Send a greeting
usage:
mytool <name>
'''
return 'echo "hello ${name}'"
This create a single *bash* interpreted script and exposes a tool,
`mytool`, into the JIP environment. You can use the decorators
arguments to further customize the tool specification, i.e. specify
a different name. If you want to use a different interpreter, you can
return a tuple where the first element is the interpreter name and the
second is the script template.
:param name: specify a tool name. If no name is specified, the name
of the decorated function or class is used as the tool
name
:param inputs: specify a list of option names that are treated
as input options
:param outputs: specify a list of option names that are treated as output
options
:param argparse: specify the name of the function or a function reference
that take an ``ArgumentParser`` instance and populates
it. This takes precedence over the doc string if the
function exists.
:param get_command: name of the function or a function reference that
implements the tools ``get_command`` function
:param validate: name of the function or a function reference that
implements the tools ``validate`` function
:param setup: name of the function or a function reference that
implements the tools ``setup`` function
:param init: name of the function or a function reference that
implements the tools ``init`` function
:param run: name of the function or a function reference that
implements the tools ``run`` function
:param pipeline: name of the function or a function reference that
implements the tools ``pipeline`` function
:param is_done: name of the function or a function reference that
implements the tools ``is_done`` function
:param cleanup: name of the function or a function reference that
implements the tools ``cleanup`` function
:param help: name of the function or a function reference that
implements the tools ``help`` function
:param add_outputs: takes a list of values to add hidden output
options
:param check_files: takes a list of option names that will be passed
through file checks on validation
"""
def __init__(self, name=None, inputs=None, outputs=None,
argparse='register', get_command='get_command',
validate='validate',
setup='setup',
init='init',
run='run',
pipeline='pipeline',
is_done='is_done',
cleanup='cleanup',
help='help',
add_outputs=None,
check_files=None,
ensure=None,
pytool=False,
force_pipeline=False):
self.name = name
self.inputs = inputs
self.outputs = outputs
self.argparse = argparse
self.add_outputs = add_outputs
self._check_files = check_files
self._ensure = ensure
self._pytool = pytool
self._force_pipeline = force_pipeline
################################################################
# tool delegates
################################################################
self._validate = validate if validate else "validate"
self._setup = setup if setup else "setup"
self._init = init if init else "init"
self._is_done = is_done if is_done else "is_done"
self._pipeline = pipeline if pipeline else "pipeline"
self._get_command = get_command if get_command else "get_command"
self._cleanup = cleanup if cleanup else "cleanup"
self._help = help if help else "help"
self._run = run if run else "run"
def __call__(self, *args):
cls = args[0]
log.debug("Decorated tool or pipeline: %s", cls)
# check the name
if self.name is None:
if isinstance(cls, types.FunctionType) and PY2:
self.name = cls.func_name
else:
self.name = cls.__name__
# overwrite the string representation
is_class = False
if not isinstance(cls, types.FunctionType):
cls.__repr__ = lambda x: self.name
is_class = True
if is_class:
old = None
if hasattr(cls, '__setattr__'):
old = cls.__setattr__
def setatr(slf, name, value):
ov = slf.__dict__.get(name, None)
if ov is not None and isinstance(ov, Option):
ov.set(value)
else:
if old:
old(slf, name, value)
else:
if name in slf.__dict__:
slf.__dict__[name] = value
else:
raise AttributeError()
cls.__setattr__ = setatr
tool_instance = PythonTool(cls, self, self.add_outputs)
Scanner.registry[self.name] = tool_instance
log.debug("Registered tool from module: %s", self.name)
return cls
################################################################
# tool delegates
################################################################
def _update_delegate(self, wrapper, instance):
# helper function to expose a name function directly
def set_name(name):
# set the job name
wrapper.job.name = name
# inject helper functions
helper_function = {
"name": set_name,
"job": wrapper.job,
"profile": wrapper.job,
"add_output": wrapper.options.add_output,
"add_input": wrapper.options.add_input,
"add_option": wrapper.options.add_option,
'r': render_template,
'render_template': render_template,
'options': wrapper.options,
'opts': wrapper.options,
'args': wrapper.args,
'ensure': wrapper.ensure,
'check_file': wrapper.check_file,
'validation_error': wrapper.validation_error
}
for k, v in iteritems(helper_function):
if not hasattr(instance, k):
instance.__dict__[k] = v
# inject options if they don't exists
for o in wrapper.options:
if not hasattr(instance, o.name):
instance.__dict__[o.name] = o
def __call_delegate(self, fun, wrapper, instance):
if not callable(fun):
name = fun
try:
fun = getattr(instance, name)
except:
# don't double validate, the python tool will call the
# Tool validate already
if name == 'validate':
return
# try to get the function frow main Tool implementation
fun = getattr(Tool, name)
if fun:
# make sure the instance is aware of the options
if (hasattr(fun, "__self__") and fun.__self__ is not None) or \
(hasattr(fun, "im_self") and fun.im_self is not None):
self._update_delegate(wrapper, instance)
# force options and args
instance.options = wrapper.options
instance.opts = wrapper.options
instance.args = wrapper.args
return fun()
else:
# function based implementation
self._update_delegate(wrapper, wrapper)
return fun(wrapper)
def validate(self, wrapper, instance):
try:
r = self.__call_delegate(self._validate, wrapper, instance)
if self._check_files:
for check in self._check_files:
wrapper.check_file(check)
if self._ensure:
for e in self._ensure:
wrapper.ensure(e[0], e[1], None if len(e) < 3 else e[2])
return r
except Exception as err:
if not isinstance(err, ValidationError):
log.debug("Validation error: %s", str(err).strip())
err = ValidationError(wrapper, str(err))
raise err
def setup(self, wrapper, instance):
return self.__call_delegate(self._setup, wrapper, instance)
def init(self, wrapper, instance):
return self.__call_delegate(self._init, wrapper, instance)
def is_done(self, wrapper, instance):
return self.__call_delegate(self._is_done, wrapper, instance)
def pipeline(self, wrapper, instance):
return self.__call_delegate(self._pipeline, wrapper, instance)
def get_command(self, wrapper, instance):
interp = "bash"
cmd = None
if not self._pytool and not isinstance(instance, types.FunctionType):
cmds = self.__call_delegate(self._get_command, wrapper,
instance)
else:
if self._pytool:
# this is a python tool that wrapps a class or function.
# In order to get a single command, we pickle the
# wrapped instance and the options and then push it
# through the pickel template
data = {
"instance": instance,
"options": wrapper.options
}
r = ('bash', _pickel_template %
(base64.b64encode(cPickle.dumps(data))))
return r
else:
# this is not a python tool function but a function
# that will return a template
argspec = inspect.getargspec(instance)
if len(argspec[0]) > 0:
cmds = instance(wrapper)
else:
cmds = instance()
if isinstance(cmds, (list, tuple)):
interp = cmds[0]
cmd = cmds[1]
else:
cmd = cmds
if interp and cmd:
block = Block(content=cmd, interpreter=interp)
return interp, block.render(wrapper)
return None, None
def cleanup(self, wrapper, instance):
return self.__call_delegate(self._cleanup, wrapper, instance)
def run(self, wrapper, instance):
return self.__call_delegate(self._run, wrapper, instance)
def help(self, wrapper, instance):
return self.__call_delegate(self._help, wrapper, instance)
class pytool(tool):
"""This is a decorator that can be used to mark single python functions
as tools. The function will be wrapped in a PythonTool instance and
the function must accept a single paramter self to access to tools
options.
"""
def __init__(self, *args, **kwargs):
kwargs['pytool'] = True
tool.__init__(self, *args, **kwargs)
class pipeline(tool):
"""This is a decorator that can be used to mark single python functions
as pipelines.
"""
def __init__(self, *args, **kwargs):
kwargs['force_pipeline'] = True
tool.__init__(self, *args, **kwargs)
class Scanner():
"""
This class holds a script/tool cache
The cache is organized in to dicts, the script_cache, which
store name->instance pairs pointing form the name of the tool
to its cahced instance. The find implementations will return
clones of the instances in the cache.
"""
registry = {}
def __init__(self, jip_path=None, jip_modules=None):
self.initialized = False
self.instances = {}
self.jip_path = jip_path if jip_path else ""
self.jip_modules = jip_modules if jip_modules else []
self.jip_file_paths = set([])
self.__scanned = False
self.__scanned_files = None
def find(self, name, path=None, is_pipeline=False):
"""Finds a tool by its name or file name.
If the given name points to an existing file, the file is loaded
as a script tools and returned. Otherwise, a default search is
triggered, optionally including the specified path.
:returns: a new instance of the tool
:rtype: :class:`Tool`
:raises ToolNotFoundException: if the tool could not be found
"""
if name is None:
return None
s = name.split(" ", 1)
args = None
if len(s) > 1:
import shlex
name = s[0]
args = shlex.split(s[1])
if exists(name) and os.path.isfile(name):
## the passed argument is a file. Try to load it at a
## script and add the files directory to the search path
tool = ScriptTool.from_file(name, is_pipeline=is_pipeline)
self._register_tool(name, tool)
self.jip_file_paths.add(dirname(name))
clone = tool.clone()
clone.init()
if args:
log.debug("Scanner | Parsing arguments passed "
"through tool name")
clone.parse_args(args)
return clone
if not self.initialized:
self.scan()
self.initialized = True
self.instances.update(Scanner.registry)
tool = self.instances.get(name, None)
if tool is None:
tool = self.instances.get(name + ".jip", None)
if tool is None:
raise ToolNotFoundException("No tool named '%s' found!" % name)
if isinstance(tool, string_types):
## the tool is not loaded, load the script,
## and add it to the cache
tool = ScriptTool.from_file(tool, is_pipeline=is_pipeline)
self._register_tool(name, tool)
log.debug("Scanner | Cloning tool %s [%s]", tool, tool.__hash__())
clone = tool.clone()
clone.init()
if args:
log.debug("Scanner | Parsing arguments passed through tool name")
clone.parse_args(args)
return clone
def scan(self, path=None):
"""Searches for scripts and python modules in the configured
locations and returns a dictionary of the detected instances
:param path: optional path value to define a folder to scan
:returns: dict of tools
"""
log.debug("Searching for JIP tools")
if self.instances is None:
self.instances = {}
self.scan_files(parent=path)
self.scan_modules()
for n, m in iteritems(Scanner.registry):
self._register_tool(n, m)
return self.instances
def _register_tool(self, name, tool):
self.instances[name] = tool
# check and load profile for the given tool
if tool.path:
spec_file = tool.path
# replace extension with .spec
try:
i = spec_file.rindex(".")
if i >= 0:
spec_file = spec_file[:i] + ".spec"
log.debug("Checking for spec file at: %s", spec_file)
if os.path.exists(spec_file):
log.info("Loading spec for %s from %s",
name, spec_file)
profile = jip.profiles.Profile.from_file(spec_file)
tool._job = profile
except Exception as err:
log.error("Error while loading spec for %s: %s", name, err,
exc_info=True)
def scan_files(self, parent=None):
"""Scan files for jip tools. This functions detects files with
the ``.jip`` extension in the default search locations.
:param parent: optional parent folder
:returns: list of found files
"""
if parent is None and self.__scanned_files is not None:
return self.__scanned_files
import re
pattern = re.compile(r'^.*(.jip)$')
files = {}
if parent:
for path in self.__search(parent, pattern):
self.instances[basename(path)] = path
files[basename(path)] = path
#check cwd
for path in self.__search(getcwd(), pattern, False):
self.instances[basename(path)] = path
files[basename(path)] = path
jip_path = "%s:%s" % (self.jip_path, getenv("JIP_PATH", ""))
for folder in jip_path.split(":") + list(self.jip_file_paths):
for path in self.__search(folder, pattern):
self.instances[basename(path)] = path
files[basename(path)] = path
if parent is None:
self.__scanned_files = files
return files
def __search(self, folder, pattern, recursive=True):
log.debug("Searching folder: %s", folder)
for path in list_dir(folder, recursive=recursive):
if pattern.match(path) and os.path.isfile(path):
log.debug("Found tool: %s", path)
yield path
def add_module(self, path):
"""Add a module or a python file to the list of module that are
scanned for tools.
:param: path to the module that will be added to the search path
"""
self.jip_modules.append(path)
self.__scanned = False
def add_folder(self, path):
"""Add a folder to the list of folders that are
scanned for tools.
:param: path to the folder that will be added to the search path
"""
self.jip_file_paths.add(path)
self.__scanned = False
self.__scanned_files = None
self.initialized = False
def scan_modules(self):
"""Loads the python modules specified in the JIP configuration.
This will register any functions and classes decorated with
one of the JIP decorators.
"""
if self.__scanned:
return
path = getenv("JIP_MODULES", "")
log.debug("Scanning modules")
for module in path.split(":") + self.jip_modules + ['jip.scripts']:
try:
if module:
log.debug("Importing module: %s", module)
__import__(module)
except ImportError as e:
log.debug("Error while importing module: %s. "
"Trying file import", str(e))
if exists(module):
self._load_from_file(module)
self.__scanned = True
def _load_from_file(self, path):
"""Try to load a module from the given file. No module is loaded
if the file does not exists. Otherwise, a fukk module name us guessed
by checking for __init__.py files upwards. Then imp.load_source is
used to import the module
:param path: the path to the module file
"""
if not exists(path):
return
name, parent_dir = self._guess_module_name(path)
log.debug("Importing module from file: %s %s %s", name, path,
parent_dir)
sys.path.insert(0, parent_dir)
mod = __import__(name)
log.debug("Imported module from file %s : %s", path, mod)
#imp.load_source(name, path)
def _guess_module_name(self, path):
"""Guess the absolute module name for the given file by checking for
__init__.py files in the current folder structure and upwards"""
path = abspath(path)
base = basename(path)
if base.endswith('.py'):
base = base[:-3]
name = [base]
def _load_package_name(current, module_name):
if '__init__.py' in listdir(current):
module_name.append(basename(current))
return _load_package_name(dirname(current), module_name)
return module_name, current
# check if this is in a package
name, parent_dir = _load_package_name(dirname(path), name)
name.reverse()
return ".".join(name), parent_dir
class Block(object):
"""Base class for executable blocks that can render themselves to scripts
and provide information about the interpreter that should be used to
run the script.
"""
def __init__(self, content=None, interpreter=None, interpreter_args=None,
lineno=0):
self._lineno = lineno
self.interpreter = interpreter
self._process = None
self.content = content
if self.content is None:
self.content = []
self.interpreter_args = interpreter_args
def run(self, tool, stdin=None, stdout=None):
"""Execute this block
"""
import subprocess
import jip
# write template to named temp file and run with interpreter
script_file = jip.create_temp_file()
try:
script_file.write(self.render(tool))
script_file.close()
cmd = [self.interpreter if self.interpreter else "bash"]
if self.interpreter_args:
cmd += self.interpreter_args
self.process = subprocess.Popen(
cmd + [script_file.name],
stdin=stdin,
stdout=stdout
)
return self.process
except OSError as err:
# catch the errno 2 No such file or directory, which indicates the
# interpreter is not available
if err.errno == 2:
raise Exception("Interpreter %s not found!" % self.interpreter)
raise err
def render(self, tool):
"""Renders this blocks content within the context of the given tool
:param tool: the tool
:returns: rendered block content
:rtype: string
"""
content = self.content
if isinstance(content, (list, tuple)):
content = "\n".join(content)
ctx = dict(tool.options.to_dict(raw=True))
ctx['tool'] = tool
ctx['__file__'] = tool.path
ctx['args'] = tool.options.to_dict()
ctx['options'] = tool.options.to_cmd
return render_template(content, **ctx)
def terminate(self):
"""
Terminate currently running blocks
"""
if self._process is not None:
if self._process._popen is not None:
self._process.terminate()
import time
# sleep and check job states a few times before we do a hard
# kill
for t in [0.01, 0.05, 0.10, 2, 3]:
time.sleep(t)
if not self.process.is_alive():
break
if self.process.is_alive():
# kill it
import os
import signal
os.kill(self.process._popen.pid, signal.SIGKILL)
def __str__(self):
return "Block['%s']" % self.interpreter
class PythonBlockUtils(object):
"""Utility functions that are exposed in template blocks and template
functions
The block utilities store a reference to the *local* and *global*
environment, to the current *tool* and to the current *pipeline*.
"""
def __init__(self, tool, local_env):
self.tool = tool
self._pipeline = None
self._local_env = local_env
self._global_env = None
if hasattr(tool, "_pipeline"):
self._pipeline = tool._pipeline
@property
def pipeline(self):
from jip import Pipeline
if self._pipeline is None:
self._pipeline = Pipeline()
self._pipeline._utils = self
return self._pipeline
def check_file(self, name):
"""Checks for the existence of a file referenced by an options.
Please note that this doe **not** take a file name, but the name
of an option. This function is preferred over a simple check
using ``os.path.exists()`` because it also checks for job dependencies.
This is important because a mandatory file might not *yet* exist
within the context of a pipeline, but it will be created at runtime
in a previous step.
:param name: the options name
:returns: True if the file exists or the file is created by another
job that will run before this options job is executed.
:rtype: boolean
"""
opt = self.tool.options[name]
if not opt.is_dependency():
self.tool.options[name].validate()
def validation_error(self, message, *args):
"""Quickly raise a validation error with a custom message.
This function simply raises a ValidationError. You can use it
in a custom validation implementation to quickly fail the validation
:param message: the message
:param args: argument interpolated into the message
:raises ValidationError: always
"""
raise ValidationError(self.tool, message % args)
def set(self, name, value):
"""Set an options value.
:param name: the options name
:type name: string
:param value: the new value
"""
self.tool.options[name].value = value
def run(self, _name, **kwargs):
"""Searches for a tool with the specified name and adds it as a
new :py:class:`~jip.pipelines.Node` to the current pipeline.
All specified keyword argument are passed as option values to
the tool.
Delegates to the pipelines :py:meth:`jip.pipelines.Pipeline.run`
method.
:param _name: the name of the tool
:type _name: string
:param kwargs: additional argument passed to the tool as options
:returns: a new node that executes the specified tool and is added
to the current pipeline
:rtype: :py:class:`jip.pipelines.Node`
"""
return self.pipeline.run(_name, **kwargs)
def job(self, *args, **kwargs):
"""Create and returns a new :class:`~jip.pipelines.Job`.
The job instance can be used to customize the execution environment
for *the next* job. For example::
job("Test", threads=2).run('mytool', ...)
This is a typical usage in a pipeline context, where a new job
environment is created and then applied to a new 'mytool' pipeline
node.
:param args: job arguments
:param kwargs: job keyword arguments
:returns: a new job instance
:rtype: :class:`jip.pipelines.Job`
"""
return self.pipeline.job(*args, **kwargs)
def name(self, name):
"""Set the runtime name of a pipeline.
The runtime name of the pipeline is stored in the database and is
used as a general identifier for a pipeline run.
**Note** that this set the name of the *pipeline* if used in a pipeline
context, otherwise it set the name of the tool/job.
Within a pipeline context, you can be changed using a :py:func:`job`::
job("my job").run(...)
or after the node was created:
myrun = run(...)
myrun.job.name = "my job"
:param name: the name of the pipeline
:type name: string
"""
self.tool._job.name = name
def bash(self, command, **kwargs):
"""Create a *bash* job that executes a bash command.
This us a fast way to build pipelines that execute shell commands. The
functions wraps the given command string in the *bash tool* that
is defined with ``input``, ``output``, and ``outfile``. Input and
output default to stdin and stdout. Note that you can access your
local context within the command string. Take for example the following
pipeline script::
name = "Joe"
bash("echo 'Hello ${name}'")
This will work as expected. The command template can access local
variables. Please keep in mind that the tools context takes precedence
over the script context. That means that::
input="myfile.txt"
bash("wc -l ${input}")
in this example, the command ``wc -l`` will be rendered and wait for
input on stdin. The bash command has an ``input`` option and that takes
precedence before the globally defined ``input`` variable. This is true
for ``input``, ``output``, and ``outfile``, even if they are not
explicitly set.
You can however access variables defined in the global context using
the `_ctx`::
input="myfile.txt"
bash("wc -l ${_ctx.input}")
will indeed render and execute ``wc -l myfile.txt``.
:param command: the bash command to execute
:type command: string
:param kwargs: arguments passed into the context used to render the
bash command. ``input``, ``output``, and ``outfile`` are
passed as options to the *bash* tool that is used to
run the command
:returns: a new pipeline node that represents the bash job
:rtype: :class:`jip.pipelines.Node`
"""
bash_node = self.pipeline.run('bash', cmd=command, **kwargs)
return bash_node
def _update_global_env(self, env):
if not self._global_env:
self._global_env = {}
self._global_env.update(env)
def _update_context(self, ctx, kwargs=None, base_node=None):
if self._global_env:
for k, v in iteritems(self._global_env):
if k not in ctx:
ctx[k] = v
if kwargs:
ctx.update(kwargs)
## update all Nodes with their default output options
if base_node is not None:
from jip.pipelines import Node
class OptionWrapper(object):
def __init__(self, node, option):
self.node = node
self.option = option
def __str__(self):
if base_node != self.node:
base_node.depends_on(self.node)
if self.option.option_type != jip.options.TYPE_OPTION:
log.debug("Adding additional input option "
"for node %s : %s",
base_node, self.option.name)
self.node._tool.options.make_absolute(
self.node._job.working_dir
)
base_node._additional_input_options.add(
self.option
)
return str(self.option)
def __getattr__(self, name):
# check that the option exists (Issue #43)
opt = self.node._tool.options[name]
if opt is None:
log.info("Option '%s' not found in %s",
name, self.node, exc_info=True)
raise ValidationError(
self.node,
"Option '%s' not found in node '%s'" % (
name, self.node
)
)
return OptionWrapper(
self.node, self.node._tool.options[name]
)
for k in ctx.keys():
v = ctx[k]
if isinstance(v, Node):
try:
ctx[k] = OptionWrapper(
v,
v._tool.options.get_default_output()
)
except LookupError:
# no default output option
pass
return ctx
class PythonBlock(Block):
"""Extends block and runs the content as embedded python
"""
def __init__(self, content=None, lineno=0):
Block.__init__(self, content=content, lineno=lineno)
self.interpreter = "__embedded__"
def run(self, tool, stdin=None, stdout=None):
"""Execute this block as an embedded python script
"""
log.debug("Block: run python block for: %s", tool)
#tmpl = self.render(tool)
content = self.content
if isinstance(content, (list, tuple)):
content = "\n".join(content)
local_env = locals()
utils = PythonBlockUtils(tool, local_env)
profile = jip.profiles.Profile()
if hasattr(tool, '_job'):
profile = tool._job
env = {
"tool": tool,
"args": tool.options.to_dict(),
"opts": tool.options,
"options": tool.options,
"check_file": utils.check_file,
"ensure": tool.ensure,
"run": utils.run,
"validation_error": utils.validation_error,
"bash": utils.bash,
"job": utils.job,
"name": utils.name,
"add_output": tool.options.add_output,
"add_input": tool.options.add_input,
"add_option": tool.options.add_option,
"set": utils.set,
'r': render_template,
'render_template': render_template,
'utils': utils,
'profile': profile,
'basename': basename,
'dirname': dirname,
'abspath': abspath,
'pwd': getcwd(),
'exists': exists,
'__file__': tool.path if tool.path else None
}
# link known tools into the context
from jip import scanner
from functools import partial
scanner.scan_modules()
for name, cls in iteritems(scanner.registry):
if not name in env:
env[name] = partial(utils.run, name)
for name, path in iteritems(scanner.scan_files()):
k = name
if k.endswith(".jip"):
k = k[:-4]
if not k in env:
env[k] = partial(utils.run, name)
# link options to context
for o in tool.options:
if not o.name in env:
n = o.name.replace("-", "_").replace(" ", "_")
env[n] = o
utils._global_env = env
old_global_context = jip.templates.global_context
set_global_context(env)
try:
env.update(local_env)
exec(content, env)
except Exception as e:
if hasattr(e, 'lineno'):
e.lineno += self._lineno
raise
# auto naming for tools
from jip.pipelines import Node
for k, v in iteritems(env):
if isinstance(v, Node):
if v._job.name is None:
v._job.name = k
# reset index
log.debug("Block: block for: %s executed", tool)
return env
def terminate(self):
"""The terminate function on a python block does nothing. A
Python block can not be terminated directly"""
pass
def __str__(self):
return "PythonBlock"
class Tool(object):
"""The base class for all implementation of executable units.
This class provides all the building block to integrated new tool
implementations that can be executed, submitted and integrated in pipelines
to construct more complex setups.
A `Tool` in a JIP setup is considered to be a container for the executions
meta-data, i.e. options and files that are needed to the actual run. The
main function of the `Tool` class is it :py:meth:`get_command`
function, which returns a tuple `(interpreter, command)`, where the
`interpreter` is a string like "bash" or "perl" or even a *path* to some
interpreter executable that will be used to execute the `command`. The
command itself is the string representation of the content of a script that
will be passed to the `interpreter` at execution time. Please note that
the :py:meth:`get_command` functions command part is supposed to be
fully *rendered*, it will not be modified any further. The JIP default
tool classes that are used, for example, to provide script to the system,
are already integrated with the :py:mod:`jip.templates` system, but you can
easily use the rendering function directly to create more dynamic commands
that can adopt easily to changed in the configuration of a tool.
The class exposes a name and a path to a source file as properties. Both
are optional and can be omitted in order to implement anonymous tools. In
addition to these *meta* data, the tools :py:meth:`__init__` function
allows you to provide a *options_source*. This object is used to create the
:py:class:`jip.options.Options` that cover the runtime configuration of a
tool. The options are initialize lazily on first access using the
`options_source` provided at initialization time. This object can be either
a string or an instance of an `argparse.ArgumentParser`. Both styles of
providing tool options are described in the :py:mod:`jip.options` module.
"""
def __init__(self, options_source=None, name=None):
"""Initialize a tool instance. If no options_source is given
the class docstring is used as a the options source.
:param options_source: either a string or an argparser instance
defaults to the class docstring
:param name: the name of this tool
"""
#: the tools name
self._name = name
#: path to the tools source file
self.path = None
self._options = None
self._options_source = options_source
self._job = None
self._is_pipeline = False
def setup(self):
"""Setup method that can be implemented to manipulate tool options
before rendering and validation. Note that options here might still
contain template string. You are also allowed to set option values
to template strings.
:raises Exception: in case of a critical error
"""
pass
def init(self):
"""Initialization method that can be implemented to initialize the tool
instance and, for example, add options. ``init`` is called once for
the tool instance and the logic within the ``init`` is not allowed to
rely on any values set or applied to the tool.
:raises Exception: in case of a critical error
"""
pass
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def job(self):
if self._job is None:
self._job = jip.profiles.Profile()
return self._job
def profile(self):
return self.job
@property
def options(self):
"""Access this tools :py:class:`jip.options.Options` instance.
The tools options are the main way to interact with and configure a
tool instance either from outside or from within a pipeline.
"""
if self._options is None:
if self._options_source is not None:
self._options = self._parse_options(self._options_source)
return self._options
@property
def args(self):
"""Returns a dictionary from the option names to the option values
"""
return self.options.to_dict()
def parse_args(self, args):
"""Parses the given argument. An excetion is raised if
an error ocurres during argument parsing
:param args: the argument list
:type args: list of strings
"""
self.options.parse(args)
def _parse_options(self, options_source, inputs=None, outputs=None):
"""Initialize the options from the docstring or an argparser.
In addition to the options, the function tries to deduce a tool
name if none was specified at construction time.
Optional inputs and outputs lists can be specified. Both must
be lists of strings containing option names. If the option is found
the option type is set accordingly to input or output. This is
usefull if the options are not organized in groups and the
parser can not automatically identify the options type.
:param options_source: ther a docstring or an argparser instance
:type options_source: string or argparse.ArgumentParser
:param inputs: list of option names that will be marked as inputs
:type inputs: list of strings
:param outputs: list of option names that will be marked as outputs
:type outputs: list of strings
"""
if options_source is None:
raise Exception("No docstring or argument parser provided!")
opts = None
if not isinstance(options_source, string_types):
opts = Options.from_argparse(options_source, source=self,
inputs=inputs, outputs=outputs)
else:
opts = Options.from_docopt(options_source, source=self,
inputs=inputs, outputs=outputs)
if self.name is None:
import re
match = re.match(r'usage:\s*\n*(\w+).*', opts.usage(),
re.IGNORECASE | re.MULTILINE)
if match:
self.name = match.groups()[0]
return opts
def validate(self):
"""The default implementation validates all options that belong to
this tool and checks that all options that are of `TYPE_INPUT`
reference existing files.
The method raises a :py:class:`ValidationError` in case an option could
not be validated or an input file does not exist.
"""
log.debug("Default options validation for %s", self)
try:
self.options.validate()
except Exception as e:
log.debug("Validation error: %s", str(e).strip())
raise ValidationError(self, str(e))
for opt in self.options.get_by_type(TYPE_INPUT):
if opt.source is not None and opt.source != self:
continue
if opt.is_dependency():
continue
for value in opt._value:
if isinstance(value, string_types):
if not exists(value):
raise ValidationError(self,
"Input file not found: %s" %
value)
def validation_error(self, message, *args):
"""Quickly raise a validation error with a custom message.
This function simply raises a ValidationError. You can use it
in a custom validation implementation to quickly fail the validation
:param message: the message
:param args: argument interpolated into the message
:raises ValidationError: always
"""
raise ValidationError(self, message % args)
def ensure(self, option_name, check, message=None):
"""Check a given option value using the check pattern or function and
raise a ValidationError in case the pattern does not match or the
function does return False.
In case of list values, please note that in case check is a pattern,
all values are checked independently. If check is a function, the
list is passed on as is if the option takes list values, otherwise,
the check function is called for each value independently.
Note also that you should not use this function to check for file
existence. Use the `check_file()` function on the option or on the
tool instead. `check_file` checks for incoming dependencies in
pipelines, in which case the file does not exist _yet_ but it
will be created by a parent job.
:param option_name: the name of the option to check
:param check: either a string that is interpreter as a regexp pattern
or a function that takes the options value as a single
paramter and returns True if the value is valid
"""
o = self.options[option_name]
if isinstance(check, string_types):
# regexp patter
import re
for v in o.value:
if not re.match(check, str(v)):
self.validation_error(
message if message else "check failed for %s" % str(v)
)
return
elif callable(check):
if o.nargs == 0 or o.nargs == 1:
for v in o.value:
if not check(v):
self.validation_error(
message if message
else "check failed for %s" % str(v)
)
else:
if not check(o.value):
self.validation_error(
message if message else "check failed for %s" % o.name
)
return
raise Exception("Ensure check paramter has to be a "
"function or a pattern")
def check_file(self, option_name):
"""Delegates to the options check name function
:param option_name: the name of the option
"""
try:
self.options[option_name].check_file()
except ValueError as e:
self.validation_error(str(e))
def is_done(self):
"""The default implementation return true if the tools has output
files and all output files exist.
"""
outfiles = set(self.get_output_files())
if len(outfiles) == 0:
return False
for outfile in outfiles:
if not exists(outfile):
return False
return True
def pipeline(self):
"""Create and return the pipeline that will run this tool"""
return None
def get_command(self):
"""Return a tuple of (template, interpreter) where the template is
a string that will be rendered and the interpreter is a name of
an interpreter that will be used to run the filled template.
"""
return "bash", _pickel_template % \
(cPickle.dumps(self).encode("base64"))
def cleanup(self):
"""The celanup method removes all output files for this tool"""
outfiles = list(self.get_output_files(sticky=False))
log.debug("Tool cleanup check files: %s", outfiles)
for outfile in outfiles:
if exists(outfile):
log.warning("Tool cleanup! Removing: %s", outfile)
if os.path.isfile(outfile):
remove(outfile)
elif os.path.isdir(outfile):
shutil.rmtree(outfile)
def get_output_files(self, sticky=True):
"""Yields a list of all output files for the options
of this tool. Only TYPE_OUTPUT options are considered
whose values are strings. If a source for the option
is not None, it has to be equal to this tool.
If `sticky` is set to False, all options marked with the
sticky flag are ignored
:param sticky: by default all output option values are returned,
if this is set to False, only non-sticky output
options are yield
:type sticky: boolean
:returns: list of file names
"""
for opt in self.options.get_by_type(TYPE_OUTPUT):
if (opt.source and opt.source != self) or \
(not sticky and opt.sticky):
continue
values = opt.value
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
if isinstance(value, string_types):
import glob
globbed = glob.glob(value)
if globbed:
for v in globbed:
yield v
else:
yield value
def get_input_files(self):
"""Yields a list of all input files for the options
of this tool. Only TYPE_INPUT options are considered
whose values are strings. If a source for the option
is not None, it has to be equal to this tool.
:returns: list of file names
"""
for opt in self.options.get_by_type(TYPE_INPUT):
if opt.source and opt.source != self:
continue
values = opt.raw()
if not isinstance(values, (list, tuple)):
values = [values]
for value in values:
if isinstance(value, string_types):
yield value
def help(self):
"""Return help for this tool. By default this delegates
to the options help.
"""
return dedent(self.options.help())
def __repr__(self):
return self.name if self.name else "<Unknown>"
def __str__(self):
return self.__repr__()
def clone(self, counter=None):
"""Clones this instance of the tool and returns the clone. If the
optional counter is profiled, the name of the cloned tool will be
updated using .counter as a suffix.
"""
cloned_tool = copy.copy(self)
cloned_tool._options = self.options.copy()
if cloned_tool.name and counter is not None:
cloned_tool.name = "%s.%d" % (cloned_tool.name, str(counter))
cloned_tool._options._help = self.options._help
cloned_tool._options._usage = self.options._usage
# update the options source
cloned_tool._options.source = cloned_tool
for o in cloned_tool._options:
o.source = cloned_tool
log.debug("Tool | cloned instance %s [%s->%s]",
self, self.__hash__(), cloned_tool.__hash__())
return cloned_tool
class PythonTool(Tool):
"""An extension of the tool class that is initialized
with a decorated class to simplify the process of implementing
Tools in python.
"""
def __init__(self, cls, decorator, add_outputs=None):
"""Initialize a new python tool
:param cls: the wrapped class
:type cls: class
:param decorator: an instance of the :class:`jip.tool` decorator
:type decorator: jip.tool
:param add_outputs: list of additional names that will be added
to the list of output options
"""
Tool.__init__(self)
self.decorator = decorator
self.cls = cls
self.name = decorator.name
try:
if not isinstance(cls, types.FunctionType):
self.instance = cls()
else:
self.instance = cls
except:
self.instance = cls
try:
self.path = inspect.getsourcefile(cls)
except:
log.debug("Unable to find source file for %s", self.name)
################################################################
# Load options either through a argparser function that was
# specified by name in the decorator or load them from the
# docstring of the instance
################################################################
self._options_source = None
self._add_outputs = add_outputs
self._is_pipeline = decorator._force_pipeline
def clone(self, counter=None):
cloned_tool = Tool.clone(self, counter=counter)
try:
if not isinstance(self.cls, types.FunctionType):
cloned_tool.instance = self.cls()
else:
cloned_tool.instance = self.cls
except:
cloned_tool.instance = self.cls
return cloned_tool
@property
def options(self):
if self._options is not None:
return self._options
if self.decorator.argparse and hasattr(self.instance,
self.decorator.argparse):
#initialize the options from argparse
import argparse
class PrintDefaultsFormatter(argparse.HelpFormatter):
def _get_help_string(self, action):
help = action.help
if '%(default)' not in action.help and \
'(default: ' not in action.help:
if action.default is not argparse.SUPPRESS:
defaulting_nargs = [argparse.OPTIONAL,
argparse.ZERO_OR_MORE]
if action.option_strings or \
action.nargs in defaulting_nargs:
if isinstance(action.default, IOBase):
if action.default == sys.stdout:
help += ' (default: stdout)'
elif action.default == sys.stdin:
help += ' (default: stdin)'
elif action.default == sys.stderr:
help += ' (default: stderr)'
else:
help += ' (default: <stream>)'
else:
help += ' (default: %(default)s)'
return help
self._options_source = argparse.ArgumentParser(
prog=self.name,
formatter_class=PrintDefaultsFormatter
)
init_parser = getattr(self.instance, self.decorator.argparse)
init_parser(self._options_source)
else:
# initialize options from doc string
import textwrap
if self.instance.__doc__ is not None:
self._options_source = textwrap.dedent(self.instance.__doc__)
else:
self._options_source = ""
# create the options
self._options = self._parse_options(self._options_source,
inputs=self.decorator.inputs,
outputs=self.decorator.outputs)
## add additional output arguments
if self._add_outputs is not None:
for arg in self._add_outputs:
if isinstance(arg, (list, tuple)):
# get default value
arg = arg[0]
self._options.add(Option(
arg,
option_type=TYPE_OUTPUT,
nargs=1,
hidden=True
))
return self._options
def run(self):
self.instance.options = self.options
self.instance.tool_instance = self
if isinstance(self.instance, types.FunctionType):
# check if the function takes a parameter
argspec = inspect.getargspec(self.instance)
if len(argspec[0]) > 0:
self.instance(self)
else:
self.instance()
else:
self.decorator.run(self, self.instance)
def validate(self):
r = self.decorator.validate(self, self.instance)
Tool.validate(self)
return r
def setup(self):
return self.decorator.setup(self, self.instance)
def init(self):
if self._add_outputs is not None:
for arg in self._add_outputs:
if isinstance(arg, (list, tuple)):
value = arg[1]
arg = arg[0]
if callable(value):
try:
value = value(self)
except Exception as err:
log.debug("Error evaluating output value: %s",
str(err), exc_info=True)
self.options[arg].set(value)
return self.decorator.init(self, self.instance)
def is_done(self):
return self.decorator.is_done(self, self.instance)
def pipeline(self):
if self.decorator._force_pipeline and isinstance(self.instance,
types.FunctionType):
# force pipeline generation. Call the instance function
# and check if the retrned value is a pipeline or a string
# strings go into a pipeline block for evaluation, pipelines
# are returned unmodified
# check if the function takes a paramter
argspec = inspect.getargspec(self.instance)
r = None
if len(argspec[0]) > 0:
r = self.instance(self)
else:
r = self.instance()
if isinstance(r, string_types):
# create a pipeline block and evaluate it
block = PythonBlock(r)
e = block.run(self)
return e['utils']._pipeline
else:
return r
return self.decorator.pipeline(self, self.instance)
def help(self):
return self.decorator.help(self, self.instance)
def cleanup(self):
return self.decorator.cleanup(self, self.instance)
def get_command(self):
return self.decorator.get_command(self, self.instance)
class ScriptTool(Tool):
"""An extension of the tool class that is initialized
with a docstring and operates on Blocks that can be loade
form a script file or from string.
If specified as initializer parameters, both the validation and the
pipeline block will be handled with special care.
Pipeline blocks currently can only be embedded python block. Therefore
the interpreter has to be 'python'. Validation blocks where the
interpreter is 'python' will be converted to embedded python blocks. This
allows the validation process to modify the tool and its arguments during
validation.
"""
def __init__(self, docstring, command_block=None, setup_block=None,
init_block=None, validation_block=None, pipeline_block=None):
Tool.__init__(self, docstring)
self.command_block = command_block
self.validation_block = validation_block
self.pipeline_block = pipeline_block
self.setup_block = setup_block
self.init_block = init_block
if self.pipeline_block:
if self.pipeline_block.interpreter is not None and \
self.pipeline_block.interpreter != 'python':
raise Exception("Pipeline blocks have to be implemented in "
"python! Sorry about that, but its really a "
"nice language :)")
self.pipeline_block = PythonBlock(
lineno=self.pipeline_block._lineno,
content=self.pipeline_block.content
)
if self.validation_block and \
(self.validation_block.interpreter is None or
self.validation_block.interpreter == 'python'):
self.validation_block = PythonBlock(
lineno=self.validation_block._lineno,
content=self.validation_block.content
)
if self.setup_block:
self.setup_block = PythonBlock(
lineno=self.setup_block._lineno,
content=self.setup_block.content
)
if self.init_block:
self.init_block = PythonBlock(
lineno=self.init_block._lineno,
content=self.init_block.content
)
if not self.command_block and not self.pipeline_block:
raise Exception("No executable or pipeline block found!")
self._is_pipeline = self.pipeline_block is not None
def pipeline(self):
if self.pipeline_block:
r = self.pipeline_block.run(self)
return r['utils'].pipeline
return Tool.pipeline(self)
def run(self):
if self.command_block:
self.command_block.run(self)
def validate(self):
if self.validation_block:
self.validation_block.run(self)
Tool.validate(self)
def init(self):
if self.init_block:
self.init_block.run(self)
Tool.init(self)
def setup(self):
if self.setup_block:
self.setup_block.run(self)
Tool.setup(self)
def get_command(self):
if self.command_block:
return self.command_block.interpreter, \
self.command_block.render(self)
return None, None
@classmethod
def from_string(cls, content):
from jip.parser import load
return load(content, script_class=cls)
@classmethod
def from_file(cls, path, is_pipeline=False):
log.debug("Load script from file: %s", path)
from jip.parser import loads
s = loads(path, script_class=cls, is_pipeline=is_pipeline)
return s
|
py | 1a4947212c87de77ddc74c6741997be66fcc0725 | #!/usr/bin/python -i
# Copyright (c)2010-2014 the Boeing Company.
# See the LICENSE file included in this distribution.
# Example CORE Python script that attaches N nodes to an EMANE 802.11abg
# network. One of the parameters is changed, the pathloss mode.
import sys, datetime, optparse
from core import pycore
from core.misc import ipaddr
from core.constants import *
from core.emane.ieee80211abg import EmaneIeee80211abgModel
# node list (count from 1)
n = [None]
def main():
usagestr = "usage: %prog [-h] [options] [args]"
parser = optparse.OptionParser(usage = usagestr)
parser.set_defaults(numnodes = 5)
parser.add_option("-n", "--numnodes", dest = "numnodes", type = int,
help = "number of nodes")
def usage(msg = None, err = 0):
sys.stdout.write("\n")
if msg:
sys.stdout.write(msg + "\n\n")
parser.print_help()
sys.exit(err)
# parse command line options
(options, args) = parser.parse_args()
if options.numnodes < 1:
usage("invalid number of nodes: %s" % options.numnodes)
for a in args:
sys.stderr.write("ignoring command line argument: '%s'\n" % a)
start = datetime.datetime.now()
# IP subnet
prefix = ipaddr.IPv4Prefix("10.83.0.0/16")
# session with some EMANE initialization
cfg = {'verbose': 'false'}
session = pycore.Session(cfg = cfg, persistent = True)
session.master = True
session.location.setrefgeo(47.57917,-122.13232,2.00000)
session.location.refscale = 150.0
session.cfg['emane_models'] = "RfPipe, Ieee80211abg, Bypass"
session.emane.loadmodels()
if 'server' in globals():
server.addsession(session)
# EMANE WLAN
print "creating EMANE WLAN wlan1"
wlan = session.addobj(cls = pycore.nodes.EmaneNode, name = "wlan1")
wlan.setposition(x=80,y=50)
names = EmaneIeee80211abgModel.getnames()
values = list(EmaneIeee80211abgModel.getdefaultvalues())
# TODO: change any of the EMANE 802.11 parameter values here
for i in range(0, len(names)):
print "EMANE 80211 \"%s\" = \"%s\"" % (names[i], values[i])
try:
values[ names.index('pathlossmode') ] = '2ray'
except ValueError:
values[ names.index('propagationmodel') ] = '2ray'
session.emane.setconfig(wlan.objid, EmaneIeee80211abgModel._name, values)
services_str = "zebra|OSPFv3MDR|IPForward"
print "creating %d nodes with addresses from %s" % \
(options.numnodes, prefix)
for i in xrange(1, options.numnodes + 1):
tmp = session.addobj(cls = pycore.nodes.CoreNode, name = "n%d" % i,
objid=i)
tmp.newnetif(wlan, ["%s/%s" % (prefix.addr(i), prefix.prefixlen)])
tmp.cmd([SYSCTL_BIN, "net.ipv4.icmp_echo_ignore_broadcasts=0"])
tmp.setposition(x=150*i,y=150)
session.services.addservicestonode(tmp, "", services_str, verbose=False)
n.append(tmp)
# this starts EMANE, etc.
session.node_count = str(options.numnodes + 1)
session.instantiate()
# start a shell on node 1
n[1].term("bash")
print "elapsed time: %s" % (datetime.datetime.now() - start)
if __name__ == "__main__" or __name__ == "__builtin__":
main()
|
py | 1a494771c6b7e5611772eafe17cf4c29074e5d44 | """Gitlab service support.
API docs: https://docs.gitlab.com/ee/api/
"""
from dateutil.parser import parse as parsetime
from snakeoil.klass import aliased, alias
from urllib.parse import urlparse, urlunparse, quote_plus
from ._jsonrest import JsonREST
from ..exceptions import RequestError, BiteError
from ..objects import Item, Attachment, Comment, TimeInterval
from ._reqs import LinkHeaderPagedRequest, PagedRequest, ParseRequest, req_cmd
from ._rest import RESTRequest
class GitlabError(RequestError):
def __init__(self, msg, code=None, text=None):
msg = 'Gitlab error: ' + msg
super().__init__(msg, code, text)
class GitlabIssue(Item):
attributes = {
'created': 'Created',
'updated': 'Modified',
}
attribute_aliases = {
'title': 'summary',
'creator': 'author',
'owner': 'assignee',
}
_print_fields = (
('summary', 'Title'),
('assignee', 'Assignee'),
('id', 'ID'),
)
type = 'issue'
def __init__(self, repo=None, comments=None, attachments=None, **kw):
for k, v in kw.items():
# Prefix project ID to issue iid depending on the connection type.
# The 'id' field unique across all issues is essentially useless
# for us since most API calls only use project IDs and iids.
# https://docs.gitlab.com/ee/api/README.html#id-vs-iid
if k == 'id':
continue
elif k == 'iid':
k = 'id'
if repo is None:
v = f"{kw['project_id']}-{v}"
elif k in ('created_at', 'updated_at', 'closed_at') and v:
v = parsetime(v)
elif k in ('author', 'assignee') and v:
v = v['username']
setattr(self, k, v)
self.attachments = attachments if attachments is not None else ()
self.comments = comments if comments is not None else ()
class GitlabComment(Comment):
pass
class GitlabAttachment(Attachment):
pass
class GitlabProject(object):
def __init__(self, **kw):
self.id = kw['id']
self.desc = kw['description']
self.owner, self.name = kw['path_with_namespace'].split('/', 1)
self.created = parsetime(kw['created_at'])
self.updated = parsetime(kw['last_activity_at'])
self.git_repo = kw['http_url_to_repo']
self.webbase = kw['web_url']
self.tags = tuple(kw['tag_list'])
self.stars = kw['star_count']
self.forks = kw['forks_count']
class Gitlab(JsonREST):
"""Service supporting the Gitlab issue tracker."""
_service = 'gitlab'
_service_error_cls = GitlabError
item = GitlabIssue
item_endpoint = '/issues'
attachment = GitlabAttachment
#attachment_endpoint = '/file'
def __init__(self, base, max_results=None, **kw):
# extract gitlab domain
url = urlparse(base)
# TODO: generalize and allow versioned API support
api_base = urlunparse((
url.scheme,
url.netloc,
'/api/v4',
None, None, None))
paths = url.path.strip('/').split('/')
try:
group, project = paths
self.repo = f'{group}/{project}'
except ValueError:
group = paths[0] if paths[0] else None
self.repo = None
self.group = group
# gitlab maxes out at 100 results per page
if max_results is None:
max_results = 100
# use endpoint for namespaced API calls:
# https://docs.gitlab.com/ee/api/README.html#namespaced-path-encoding
endpoint = f"/projects/{quote_plus(self.repo)}" if self.repo is not None else ''
super().__init__(endpoint=endpoint, base=api_base, max_results=max_results, **kw)
self.webbase = base
def parse_response(self, response):
data = super().parse_response(response)
if 'error' not in data:
return data
else:
self.handle_error(code=response.status_code, msg=data['error'])
class GitlabPagedRequest(PagedRequest, LinkHeaderPagedRequest, RESTRequest):
"""Requests supporting gitlab's pagination method.
Docs: https://docs.gitlab.com/ee/api/README.html#pagination
"""
# Gitlab supports link headers as the canonical method for pagination, but
# it also provides parameters to request a given page so use those instead
# in order to easily generate async calls for future pages. Note that the
# total size of the query is still extracted from the headers though since
# that information isn't provided in the data response.
_page_key = 'page'
_size_key = 'per_page'
_total_key = 'NONE'
_total_header = 'X-Total'
# gitlab defaults to starting at page 1
_start_page = 1
# TODO: Add more specific Elasticsearch functionality to another search req
# class, especially since gitlab.com doesn't support elasticsearch queries yet
# but newer self-hosted instances should.
@req_cmd(Gitlab, cmd='search')
class _SearchRequest(ParseRequest, GitlabPagedRequest):
"""Construct a search request.
Gitlab uses Elasticsearch on the backend so advanced queries use its syntax.
Docs: https://docs.gitlab.com/ee/user/search/advanced_search_syntax.html
"""
# map from standardized kwargs name to expected service parameter name
_params_map = {
'status': 'state',
}
def __init__(self, **kw):
if kw['service'].group is not None and kw['service'].repo is None:
self.endpoint = f"/groups/{kw['service'].group}/issues"
else:
self.endpoint = '/issues'
self._repo = kw['service'].repo
super().__init__(endpoint=self.endpoint, **kw)
def parse(self, data):
issues = super().parse(data)
for issue in issues:
yield self.service.item(repo=self._repo, **issue)
@aliased
class ParamParser(ParseRequest.ParamParser):
# map of allowed status input values to service parameters, aliases are
# capitalized
_status_map = {
'open': 'opened',
'closed': 'closed',
'ALL': 'ALL',
}
def _finalize(self, **kw):
if not self.params:
raise BiteError('no supported search terms or options specified')
# default to returning only open issues
self.params.setdefault('status', 'opened')
# status must be unset to search across all values
if self.params['status'] == 'ALL':
del self.params['status']
# don't restrict scope by default
self.params.setdefault('scope', 'all')
# show issues in ascending order by default
self.params.setdefault('sort', 'asc')
def terms(self, k, v):
self.params['search'] = v
self.options.append(f"Summary: {', '.join(v)}")
def id(self, k, v):
self.params['iids[]'] = v
self.options.append(f"IDs: {', '.join(map(str, v))}")
def labels(self, k, v):
self.params[k] = ','.join(v)
self.options.append(f"{k.capitalize()}: {', '.join(v)}")
def milestone(self, k, v):
self.params[k] = v
self.options.append(f"{k.capitalize()}: {v}")
def status(self, k, v):
value = self._status_map.get(v)
if value is None:
raise BiteError(
f"invalid status value: {v} "
f"(available: {', '.join(sorted(self._status_map))})")
self.params[k] = value
self.options.append(f"{k.capitalize()}: {v}")
def group(self, k, v):
self.request.kwargs['endpoint'] = f'/groups/{v}/issues'
self.options.append(f"{k.capitalize()}: {v}")
def repo(self, k, v):
if self.service.group is None:
if '/' not in v:
raise BiteError(f'repo missing group: {v!r}')
repo = v
else:
repo = f'{self.service.group}/{v}'
self.request.kwargs['endpoint'] = f"/projects/{quote_plus(repo)}/issues"
self.request._repo = repo
self.options.append(f"{k.capitalize()}: {v}")
def project(self, k, v):
if self.service.group is None:
raise BiteError(f'missing group')
repo = f'{self.service.group}/{v}'
self.request.kwargs['endpoint'] = f"/projects/{quote_plus(repo)}/issues"
self.request._repo = repo
self.options.append(f"{k.capitalize()}: {v}")
@alias('modified')
def created(self, k, v):
field = 'updated' if k == 'modified' else k
if not isinstance(v, TimeInterval):
v = TimeInterval(v)
start, end = v
if start:
self.params[f'{field}_after'] = start.isoformat()
if end:
self.params[f'{field}_before'] = end.isoformat()
self.options.append(f'{k.capitalize()}: {v}')
# TODO: move to using search API
@req_cmd(Gitlab, cmd='project_search')
class _ProjectSearchRequest(ParseRequest, GitlabPagedRequest):
"""Construct a project search request."""
def __init__(self, **kw):
if kw['service'].group is not None and kw['service'].repo is None:
self.endpoint = f"/groups/{kw['service'].group}/projects"
else:
self.endpoint = '/projects'
super().__init__(endpoint=self.endpoint, **kw)
def parse(self, data):
projects = list(super().parse(data))
for project in projects:
yield GitlabProject(**project)
@aliased
class ParamParser(ParseRequest.ParamParser):
def _finalize(self, **kw):
if not self.params:
raise BiteError('no supported search terms or options specified')
# show issues in ascending order by default
self.params.setdefault('sort', 'asc')
def terms(self, k, v):
self.params['search'] = v
self.options.append(f"Summary: {', '.join(v)}")
|
py | 1a4947baa5da179d13f4c92e1121d8597f6aa76b | # pylint: disable=wildcard-import, unused-wildcard-import
"""Model store which handles pretrained models from both
mxnet.gluon.model_zoo.vision and gluoncv.models
"""
from mxnet import gluon
from .ssd import *
from .faster_rcnn import *
from .fcn import *
from .pspnet import *
from .cifarresnet import *
from .cifarresnext import *
from .cifarwideresnet import *
from .resnetv1b import *
from .resnext import *
from .senet import *
from .se_resnet import *
from .yolo import *
from .nasnet import *
__all__ = ['get_model', 'get_model_list']
_models = {
'ssd_300_vgg16_atrous_voc': ssd_300_vgg16_atrous_voc,
'ssd_300_vgg16_atrous_coco': ssd_300_vgg16_atrous_coco,
'ssd_300_vgg16_atrous_custom' : ssd_300_vgg16_atrous_custom,
'ssd_512_vgg16_atrous_voc': ssd_512_vgg16_atrous_voc,
'ssd_512_vgg16_atrous_coco': ssd_512_vgg16_atrous_coco,
'ssd_512_vgg16_atrous_custom': ssd_512_vgg16_atrous_custom,
'ssd_512_resnet18_v1_voc': ssd_512_resnet18_v1_voc,
'ssd_512_resnet18_v1_coco': ssd_512_resnet18_v1_coco,
'ssd_512_resnet50_v1_voc': ssd_512_resnet50_v1_voc,
'ssd_512_resnet50_v1_coco': ssd_512_resnet50_v1_coco,
'ssd_512_resnet50_v1_custom': ssd_512_resnet50_v1_custom,
'ssd_512_resnet101_v2_voc': ssd_512_resnet101_v2_voc,
'ssd_512_resnet152_v2_voc': ssd_512_resnet152_v2_voc,
'ssd_512_mobilenet1_0_voc': ssd_512_mobilenet1_0_voc,
'ssd_512_mobilenet1_0_coco': ssd_512_mobilenet1_0_coco,
'ssd_512_mobilenet1_0_custom': ssd_512_mobilenet1_0_custom,
'faster_rcnn_resnet50_v1b_voc': faster_rcnn_resnet50_v1b_voc,
'faster_rcnn_resnet50_v1b_coco': faster_rcnn_resnet50_v1b_coco,
'cifar_resnet20_v1': cifar_resnet20_v1,
'cifar_resnet56_v1': cifar_resnet56_v1,
'cifar_resnet110_v1': cifar_resnet110_v1,
'cifar_resnet20_v2': cifar_resnet20_v2,
'cifar_resnet56_v2': cifar_resnet56_v2,
'cifar_resnet110_v2': cifar_resnet110_v2,
'cifar_wideresnet16_10': cifar_wideresnet16_10,
'cifar_wideresnet28_10': cifar_wideresnet28_10,
'cifar_wideresnet40_8': cifar_wideresnet40_8,
'cifar_resnext29_32x4d': cifar_resnext29_32x4d,
'cifar_resnext29_16x64d': cifar_resnext29_16x64d,
'fcn_resnet50_voc': get_fcn_voc_resnet50,
'fcn_resnet101_voc': get_fcn_voc_resnet101,
'fcn_resnet50_ade': get_fcn_ade_resnet50,
'psp_resnet50_ade': get_psp_ade_resnet50,
'resnet18_v1b': resnet18_v1b,
'resnet34_v1b': resnet34_v1b,
'resnet50_v1b': resnet50_v1b,
'resnet101_v1b': resnet101_v1b,
'resnet152_v1b': resnet152_v1b,
'resnet50_v1c': resnet50_v1c,
'resnet101_v1c': resnet101_v1c,
'resnet152_v1c': resnet152_v1c,
'resnet50_v1d': resnet50_v1d,
'resnet101_v1d': resnet101_v1d,
'resnet152_v1d': resnet152_v1d,
'resnext50_32x4d': resnext50_32x4d,
'resnext101_32x4d': resnext101_32x4d,
'resnext101_64x4d': resnext101_64x4d,
'se_resnext50_32x4d': se_resnext50_32x4d,
'se_resnext101_32x4d': se_resnext101_32x4d,
'se_resnext101_64x4d': se_resnext101_64x4d,
'senet_52': senet_52,
'senet_103': senet_103,
'senet_154': senet_154,
'se_resnet18_v1': se_resnet18_v1,
'se_resnet34_v1': se_resnet34_v1,
'se_resnet50_v1': se_resnet50_v1,
'se_resnet101_v1': se_resnet101_v1,
'se_resnet152_v1': se_resnet152_v1,
'se_resnet18_v2': se_resnet18_v2,
'se_resnet34_v2': se_resnet34_v2,
'se_resnet50_v2': se_resnet50_v2,
'se_resnet101_v2': se_resnet101_v2,
'se_resnet152_v2': se_resnet152_v2,
'darknet53': darknet53,
'yolo3_darknet53_coco': yolo3_darknet53_coco,
'yolo3_darknet53_voc': yolo3_darknet53_voc,
'yolo3_darknet53_custom': yolo3_darknet53_custom,
'nasnet_4_1056': nasnet_4_1056,
'nasnet_5_1538': nasnet_5_1538,
'nasnet_7_1920': nasnet_7_1920,
'nasnet_6_4032': nasnet_6_4032,
}
def get_model(name, **kwargs):
"""Returns a pre-defined model by name
Parameters
----------
name : str
Name of the model.
pretrained : bool
Whether to load the pretrained weights for model.
classes : int
Number of classes for the output layer.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
Returns
-------
HybridBlock
The model.
"""
try:
net = gluon.model_zoo.vision.get_model(name, **kwargs)
return net
except ValueError as e:
upstream_supported = str(e)
# avoid raising inside which cause a bit messy error message
name = name.lower()
if name not in _models:
raise ValueError('%s\n\t%s' % (upstream_supported, '\n\t'.join(sorted(_models.keys()))))
net = _models[name](**kwargs)
return net
def get_model_list():
"""Get the entire list of model names in model_zoo.
Returns
-------
list of str
Entire list of model names in model_zoo.
"""
return _models.keys()
|
py | 1a4947d52ad946e74a54059422dcf5bfaac8cc61 | __all__ = ['DynamicPricingConfig']
from enum import Enum, unique
from typing import List
from ..primitives.base import attribute, BaseTolokaObject
class DynamicPricingConfig(BaseTolokaObject, kw_only=False):
"""The dynamic pricing settings.
Attributes:
type: Parameter type for calculating dynamic pricing. The SKILL value.
skill_id: ID of the skill that the task price is based on
intervals: Skill level intervals. Must not overlap.
A performer with a skill level that is not included in any interval will receive the basic
price for a task suite.
"""
@unique
class Type(Enum):
"""Dynamic pricing type"""
SKILL = 'SKILL'
class Interval(BaseTolokaObject):
"""Skill level interval
Attributes:
from_: Lower bound of the interval.
to: dynamic_pricing_config.intervals.to
reward_per_assignment: The price per task page for a performer with the specified skill level.
"""
from_: int = attribute(origin='from')
to: int
reward_per_assignment: float
type: Type
skill_id: str
intervals: List[Interval]
|
py | 1a49484dc0a8edd591e2f131c911fd6151f4f283 | from functools import wraps
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.losses import (
BinaryCrossentropy,
CategoricalCrossentropy,
MeanAbsoluteError,
MeanSquaredError,
)
def distributed_sum_over_batch_size(batch_size: int):
def _distributed_sum_over_batch_size(function):
@wraps(function)
def wrapper(*args, **kwargs):
output_tensor = function(*args, **kwargs)
return tf.nn.compute_average_loss(
output_tensor, global_batch_size=batch_size
)
return wrapper
return _distributed_sum_over_batch_size
def distributed_mean(function):
@wraps(function)
def wrapper(*args, **kwargs):
output_tensor = function(*args, **kwargs)
return tf.math.reduce_mean(output_tensor) / 2
return wrapper
@tf.function
def apply_softmax(logits):
return keras.activations.softmax(logits)
@distributed_mean
@tf.function
def compute_l1_loss(fake_outputs, ground_truth):
return MeanAbsoluteError(reduction=keras.losses.Reduction.NONE)(
ground_truth, fake_outputs
)
@distributed_mean
@tf.function
def compute_binary_crossentropy(y_true, y_predicted) -> float:
"""Compute Binary Categorical Crossentropy.
### Parameters:
y_true:
y_predicted:
### Returns:
the computed loss.
"""
return BinaryCrossentropy(
from_logits=False,
reduction=keras.losses.Reduction.NONE,
)(y_true, y_predicted)
@tf.function
def compute_categorical_crossentropy(logits, labels) -> float:
"""Compute Sparse Categorical Crossentropy.
### Parameters:
logits: the logits
labels: the labels
### Returns:
Computed loss.
"""
# return CategoricalCrossentropy()( # reduction=keras.losses.Reduction.NONE)(
# logits, labels
# )
return CategoricalCrossentropy(reduction=keras.losses.Reduction.NONE)(
labels, logits
)
@distributed_mean
@tf.function
def compute_euclidean_distance(fake_outputs, ground_truth) -> float:
return MeanSquaredError(reduction=keras.losses.Reduction.NONE)(
ground_truth, fake_outputs
)
@tf.function
def normalize(logits, axis: int = None, name: str = None):
normalized = tf.linalg.normalize(logits, ord="euclidean", axis=axis, name=name)[0]
return tf.squeeze(normalized)
|
py | 1a494929a501268b30d0769c452948eebd42baab | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Download and build the data if it does not exist.
import parlai.core.build_data as build_data
import gzip
import os
import re
from parlai.core.build_data import DownloadableFile
from parlai.utils.io import PathManager
RESOURCES = [
DownloadableFile(
'http://opus.lingfil.uu.se/download.php?f=OpenSubtitles/en.tar.gz',
'OpenSubtitles.tar.gz',
'aef6d57db36c221b8cff1cf2356309874c27ef6a36bb8ca714509b37d0db29bc',
)
]
def _regularize(sent):
sent = sent.replace('i>', '').replace('<', '').replace('>', '')
sent = re.sub(r'x[0-9|a-f][0-9|a-f]', ' ', sent)
sent = sent.replace('\\', '').replace('-', '')
sent = ' '.join(re.findall(r"[\w']+|[.,!?:;]", sent))
sent = sent.replace('. .', '...')
sent = ' '.join(sent.split())
return sent
def create_fb_format(inpath, outpath):
print('[building fbformat]')
with PathManager.open(
os.path.join(outpath, 'train.txt'), 'w'
) as ftrain, PathManager.open(
os.path.join(outpath, 'valid.txt'), 'w'
) as fvalid, PathManager.open(
os.path.join(outpath, 'test.txt'), 'w'
) as ftest:
conv_id = 0
# find all the files.
for root, _subfolder, files in os.walk(inpath):
for f in files:
if f.endswith('.gz'):
dialog = []
conv_id = conv_id + 1
with gzip.open(os.path.join(root, f), 'r') as f1:
words = []
line_id = 1
turn_id = 0
for line in f1:
line = str(line)
if line.find('<s id="') != -1:
# new sentence
if len(words) > 0:
curr_words = _regularize(''.join(words))
if len(curr_words) > 0:
if (turn_id % 2) == 0:
dialog.append(str(line_id))
dialog.append(' ')
dialog.append(curr_words)
else:
dialog.append('\t')
dialog.append(curr_words)
dialog.append('\n')
line_id += 1
turn_id += +1
words.clear()
else:
i1 = line.find('<w id="')
if i1 >= 0:
line = line[i1:]
word = line[line.find('>') + 1 : line.find('</w')]
words.append(' ')
words.append(word.replace('\t', ' '))
handle = ftrain
if (conv_id % 10) == 0:
handle = ftest
if (conv_id % 10) == 1:
handle = fvalid
dialog.append('\n')
handle.write(''.join(dialog))
def build(datapath):
dpath = os.path.join(datapath, 'OpenSubtitles')
version = '2'
if not build_data.built(dpath, version_string=version):
print('[building data: ' + dpath + ']')
if build_data.built(dpath):
# An older version exists, so remove these outdated files.
build_data.remove_dir(dpath)
build_data.make_dir(dpath)
# Download the data.
for downloadable_file in RESOURCES:
downloadable_file.download_file(dpath)
create_fb_format(os.path.join(dpath, 'OpenSubtitles', 'en'), dpath)
# Mark the data as built.
build_data.mark_done(dpath, version_string=version)
return dpath
|
py | 1a4949701436f51fde31cbe8f3fe5be76d7de155 | import itertools as it, operator as op, functools as ft
from xml.sax.saxutils import escape as xml_escape
import html.parser, html.entities
import os, re, collections as cs, urllib.request as ulr
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('Gdk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf, GLib, Pango
from . import core
import logging
log = logging.getLogger(__name__)
class MarkupToText(html.parser.HTMLParser):
def handle_starttag(self, tag, attrs): pass
def handle_endtag(self, tag): pass
def handle_entityref(self, ref): self.d.append(f'&{ref};')
def handle_charref(self, ref): self.d.append(f'&#{ref};')
def handle_data(self, data): self.d.append(data)
def __call__(self, s):
self.d = list()
self.feed(s)
return ''.join(self.d).strip()
strip_markup = MarkupToText()
class NotificationDisplay:
'''Interface to display notification stack.
Should have "display(note, cb_dismiss=None) -> nid(UInt32, >0)", "close(nid)"
methods and NoWindowError(nid) exception, raised on erroneous nid's in close().
Current implementation based on notipy: git://github.com/the-isz/notipy.git'''
window = cs.namedtuple('Window', 'gobj event_boxes')
base_css = b'''
#notification { background: transparent; }
#notification #frame { background-color: #d4ded8; padding: 3px; }
#notification #hs { background-color: black; }
#notification #critical { background-color: #ffaeae; }
#notification #normal { background-color: #f0ffec; }
#notification #low { background-color: #bee3c6; }
#notification #summary {
color: black;
padding-left: 5px;
font-size: 1.2em;
text-shadow: 1px 1px 0px gray;
}
#notification #body { color: black; font-size: 1em; }
#notification #body * { color: black; background-color: #d4ded8; }
'''
base_css_min = b'#notification * { font-size: 8; }' # simpliest fallback
def __init__( self, layout_margin,
layout_anchor, layout_direction, icon_scale=dict(),
markup_default=False, markup_warn=False, markup_strip=False ):
self.margins = dict(it.chain.from_iterable(map(
lambda ax: ( (2**ax, layout_margin),
(-2**ax, layout_margin) ), range(2) )))
self.layout_anchor = layout_anchor
self.layout_direction = layout_direction
self.icon_scale = icon_scale
self.markup_default = markup_default
self.markup_warn, self.markup_strip = markup_warn, markup_strip
self._windows = dict()
self._default_style = self._get_default_css()
screen = Gdk.Screen.get_default()
if not screen: raise core.StartupFailure('No X screen detected')
Gtk.StyleContext.add_provider_for_screen(
screen, self._default_style,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION )
def _pango_markup_parse(self, text, _err_mark='[TN82u8] '):
try:
success, _, text, _ = Pango.parse_markup(text, -1, '\0')
if not success: raise GLib.GError('pango_parse_markup failure')
except GLib.GError as err:
success = False # should be rendered as text
if self.markup_warn:
msg_start = f'{_err_mark}Pango formatting failed'
if msg_start not in text: # detect and avoid possible feedback loops
log.warn('%s (%s) for text, stripping markup: %r', msg_start, err, text)
if self.markup_strip: # strip + re-parse to convert xml entities and such
text = strip_markup(text)
try: _, _, text, _ = Pango.parse_markup(text, -1, '\0')
except GLib.GError: pass
return success, text
def _get_default_css(self):
css, base_css = Gtk.CssProvider(), self.base_css
for attempt in range(6):
try: css.load_from_data(base_css)
except GLib.GError as err:
log.warn('Failed to load default CSS style (try %s): %s', attempt+1, err)
# print(base_css)
else: break
# Try to work around https://bugzilla.gnome.org/show_bug.cgi?id=678876 and similar issues
if attempt == 0:
base_css = re.sub(br'\b(background-color:)\s*rgba\([^;]+;', br'\1 white;', base_css)
elif attempt == 1:
base_css = re.sub(br'\b(font-size:)\s*(\d+)px\s*;', br'\1 \2;', base_css)
elif attempt == 2:
base_css = re.sub(br'\b(text-shadow:)[^;]+;', br'\1 1 1 0 gray;', base_css)
elif attempt == 3: base_css = re.sub(br'\btext-shadow:[^;]+;', b'', base_css)
elif attempt == 4: base_css = self.base_css_min # last resort before no-css-at-all
else: break # don't load any css
return css
def _update_layout(self):
# Get the coordinates of the "anchor" corner (screen corner +/- margins)
base = tuple(map(
lambda ax, gdk_dim=('width', 'height'):\
(getattr(Gdk.Screen, gdk_dim[ax])() - self.margins[2**ax])\
if 2**ax & self.layout_anchor else self.margins[-2**ax], range(2) ))
# Iterate over windows in order, placing each one starting from a "base" corner
for win in map(op.attrgetter('gobj'), self._windows.values()):
win.move(*map(lambda ax: base[ax] - ( win.get_size()[ax]
if 2**ax & self.layout_anchor else 0 ), range(2)))
margin = self.margins[(2 * ( (2**self.layout_direction)
& self.layout_anchor ) / 2**self.layout_direction - 1) * 2**self.layout_direction]
base = tuple(map(
lambda ax: base[ax] if self.layout_direction != ax else\
base[ax] + (margin + win.get_size()[ax])\
* (2 * (2**ax ^ (2**ax & self.layout_anchor)) / 2**ax - 1), range(2) ))
def _get_icon(self, icon, remote=False):
widget_icon = None
if icon is not None:
if isinstance(icon, str):
icon_path = os.path.expanduser(ulr.url2pathname(icon))
if icon_path.startswith('file://'): icon_path = icon_path[7:]
if os.path.isfile(icon_path):
widget_icon = GdkPixbuf.Pixbuf.new_from_file(icon_path)
else:
# Available names: Gtk.IconTheme.get_default().list_icons(None)
theme = Gtk.IconTheme.get_default()
icon_size = any(self.icon_scale.get('fixed', list())) or 32
widget_icon = theme.lookup_icon(
icon, icon_size, Gtk.IconLookupFlags.USE_BUILTIN )
if widget_icon: widget_icon = widget_icon.load_icon()
else:
# Msgs from remote hosts natually can have non-local icon paths in them
(log.warn if not remote else log.debug)(
'Provided icon info seem to be neither valid icon file nor'
' a name in a freedesktop.org-compliant icon theme (or current theme'
' does not have that one), ignoring it: %r', core.format_trunc(icon) )
else:
w, h, rowstride, has_alpha, bits_per_sample, channels, data = icon
data = bytes(bytearray(data))
widget_icon = GdkPixbuf.Pixbuf.new_from_data(
data, GdkPixbuf.Colorspace.RGB, bool(has_alpha),
int(bits_per_sample), int(w), int(h), int(rowstride) )
widget_icon._data = data # must be preserved from gc
if widget_icon:
if any(it.chain.from_iterable(self.icon_scale.values())): # scale icon
w, h = widget_icon.get_width(), widget_icon.get_height()
for k in 'fixed', 'min', 'max':
box_w, box_h = self.icon_scale.get(k, (0, 0))
if not any([box_w, box_h]): continue
if k == 'min' and not ((box_w and w < box_w) or (box_h and h < box_h)): continue
if k == 'max' and not ((box_w and w > box_w) or (box_h and h > box_h)): continue
scale_down = (box_w and w > box_w) or (box_h and h > box_h)
if scale_down: scale = min # factor<1, unspec=1, must fit on both dimensions
elif box_w and box_h: scale = min # factor>1, but still pick min to fit on both
else: scale = max # ignore unspec=1 and scale to max possible factor
scale = scale(float(box_w or w) / w, float(box_h or h) / h)
box_w, box_h = w * scale, h * scale
log.debug( 'Scaling image (%s, criteria: %s) by a factor of'
' %.3f: %dx%d -> %dx%d', ['up', 'down'][scale_down], k, scale, w, h, box_w, box_h )
widget_icon = widget_icon.scale_simple(box_w, box_h, GdkPixbuf.InterpType.BILINEAR)
if k == 'fixed': break # no need to apply min/max after that
widget_icon, pixbuf = Gtk.Image(), widget_icon
widget_icon.set_from_pixbuf(pixbuf)
return widget_icon
def _set_visual(self, win, ev=None):
visual = win.get_screen().get_rgba_visual()
if visual: win.set_visual(visual)
def _create_win( self, summary, body,
icon=None, urgency_label=None, markup=False, remote=None ):
log.debug( 'Creating window with parameters: %s',
core.repr_trunc_rec(dict( summary=summary, body=body,
icon=icon, urgency=urgency_label, markup=markup )) )
win = Gtk.Window(name='notification', type=Gtk.WindowType.POPUP)
win.set_default_size(400, 20)
win.connect('screen-changed', self._set_visual)
self._set_visual(win)
ev_boxes = [win]
frame = Gtk.Box(name='frame')
win.add(frame)
try: widget_icon = self._get_icon(icon, remote=remote)
except Exception: # Gdk may raise errors for some images/formats
log.exception('Failed to set notification icon')
widget_icon = None
box_margin = 3
v_box = Gtk.VBox(spacing=box_margin, expand=False)
if widget_icon is not None:
h_box = Gtk.HBox(spacing=box_margin * 2)
frame.pack_start(h_box, True, True, 0)
h_box.pack_start(widget_icon, False, False, 0)
h_box.pack_start(v_box, True, True, 0)
ev_boxes.append(h_box)
else: frame.pack_start(v_box, True, True, 0)
widget_summary = Gtk.Label(name='summary')
# Sanitize tags through pango first, so set_markup won't produce empty label
markup_summary = markup
if markup_summary:
markup_summary, text = self._pango_markup_parse(summary)
if markup_summary: widget_summary.set_markup(summary)
else: summary = text
if not markup_summary: widget_summary.set_text(summary)
widget_summary.set_alignment(0, 0)
if urgency_label:
summary_box = Gtk.EventBox(name=urgency_label)
summary_box.add(widget_summary)
else: summary_box = widget_summary
v_box.pack_start(summary_box, False, False, 0)
ev_boxes.append(summary_box)
v_box.pack_start(Gtk.HSeparator(name='hs'), False, False, 0)
widget_body = Gtk.TextView( name='body',
wrap_mode=Gtk.WrapMode.WORD_CHAR,
cursor_visible=False, editable=False )
widget_body_buffer = widget_body.get_buffer()
# Same as with summary - sanitize tags through pango first
markup_body = markup
if markup_body:
markup_body, text = self._pango_markup_parse(body)
if markup_body:
cursor = widget_body_buffer.get_end_iter()
widget_body_buffer.insert_markup(cursor, body, -1)
else: body = text
if not markup_body: widget_body_buffer.set_text(body)
v_box.pack_start(widget_body, True, True, 0)
ev_boxes.append(widget_body)
# Make sure the window is initially drawn off-screen, because it can't be
# placed properly until it's size is known, and it's size is unknown until it's
# actually handled by window manager and then drawn by X
# Proper placement is done on update_layout() call
win.move(-2000, -2000)
win.show_all()
return self.window(win, ev_boxes)
def get_note_markup(self, note):
return note.hints.get('x-nt-markup', self.markup_default)
def get_note_text(self, note):
'Returns note text, stripped of all markup, if any (and if enabled).'
markup, summary, body = self.get_note_markup(note), note.summary, note.body
if markup:
_, summary = self._pango_markup_parse(summary)
_, body = self._pango_markup_parse(body)
return summary, body
def display(self, note, cb_dismiss=None, cb_hover=None, cb_leave=None):
try:
# Priorities for icon sources:
# image{-,_}data: hint. raw image data structure of signature (iiibiiay)
# image{-,_}path: hint. either an URI (file://...) or a name in a f.o-compliant icon theme
# app_icon: parameter. same as image-path
# icon_data: hint. same as image-data
# image_* is a deprecated hints from 1.1 spec, 1.2 is preferred
# (don't seem to be even mentioned in 1.2 spec icon priorities section)
hints = note.hints.copy()
k = '__app_icon' # to avoid clobbering anything
hints[k] = note.icon
for k in 'image-data', 'image_data',\
'image-path', 'image_path', k, 'icon_data':
image = hints.get(k)
if image:
log.debug('Got icon image from hint: %s', k)
break
urgency = note.hints.get('urgency')
if urgency is not None: urgency = core.urgency_levels.by_id(int(urgency))
markup = self.get_note_markup(note)
win = self._create_win( note.summary, note.body,
image, urgency, markup=markup, remote=note.hints.get('x-nt-from-remote') )
for eb in win.event_boxes:
eb.add_events(
Gdk.EventMask.BUTTON_PRESS_MASK
| Gdk.EventMask.POINTER_MOTION_MASK
| Gdk.EventMask.LEAVE_NOTIFY_MASK )
for ev,cb in [
('button-press-event', cb_dismiss),
('motion-notify-event', cb_hover),
('leave-notify-event', cb_leave) ]:
if cb: eb.connect(ev, lambda w,ev,cb,nid: cb(nid), cb, note.id)
if cb_dismiss and win.event_boxes:
# Connect only to window object (or first eventbox in the list)
win.event_boxes[0].connect( 'destroy',
lambda w,cb,nid: cb(nid), cb_dismiss, note.id )
# update_layout() *must* be delayed until window "configure-event", because
# actual window size is unknown until it's resized by window manager and drawn by X
# See the list of caveats here:
# http://developer.gnome.org/gtk3/unstable/GtkWindow.html#gtk-window-get-size
win.gobj.connect('configure-event', lambda w,void: self._update_layout())
self._windows[note.id] = win
except: log.exception('Failed to create notification window')
class NoWindowError(Exception): pass
def _close(self, nid):
try: win = self._windows.pop(nid).gobj
except KeyError: raise self.NoWindowError(nid)
win.hide(), win.destroy()
def close(self, nid):
self._close(nid)
self._update_layout()
|
py | 1a4949d0e927efb5d5dbf61c6a7357840ac8ce59 | # =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2021 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
# Authors: Radu Serban
# =============================================================================
#
# Main driver function for the FED-alpha full model.
#
# The vehicle reference frame has Z up, X towards the front of the vehicle, and
# Y pointing to the left.
#
# =============================================================================
import pychrono as chrono
import pychrono.vehicle as veh
import pychrono.irrlicht as irr
import math as m
# =============================================================================
def main():
#print("Copyright (c) 2017 projectchrono.org\nChrono version: ", CHRONO_VERSION , "\n\n")
# Create systems
# Create the FEDA vehicle, set parameters, and initialize
my_feda = veh.FEDA()
my_feda.SetContactMethod(contact_method)
my_feda.SetChassisCollisionType(chassis_collision_type)
my_feda.SetChassisFixed(False)
my_feda.SetInitPosition(chrono.ChCoordsysD(initLoc, initRot))
my_feda.SetPowertrainType(powertrain_model)
my_feda.SetTireType(tire_model)
my_feda.SetTireStepSize(tire_step_size)
my_feda.Initialize()
my_feda.SetChassisVisualizationType(chassis_vis_type)
my_feda.SetSuspensionVisualizationType(suspension_vis_type)
my_feda.SetSteeringVisualizationType(steering_vis_type)
my_feda.SetWheelVisualizationType(wheel_vis_type)
my_feda.SetTireVisualizationType(tire_vis_type)
# Create the terrain
terrain = veh.RigidTerrain(my_feda.GetSystem())
if (contact_method == chrono.ChContactMethod_NSC):
patch_mat = chrono.ChMaterialSurfaceNSC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
elif (contact_method == chrono.ChContactMethod_SMC):
patch_mat = chrono.ChMaterialSurfaceSMC()
patch_mat.SetFriction(0.9)
patch_mat.SetRestitution(0.01)
patch_mat.SetYoungModulus(2e7)
patch = terrain.AddPatch(patch_mat,
chrono.ChVectorD(0, 0, 0), chrono.ChVectorD(0, 0, 1),
terrainLength, terrainWidth)
patch.SetTexture(veh.GetDataFile("terrain/textures/tile4.jpg"), 200, 200)
patch.SetColor(chrono.ChColor(0.8, 0.8, 0.5))
terrain.Initialize()
# Create the vehicle Irrlicht interface
vis = veh.ChWheeledVehicleVisualSystemIrrlicht()
my_feda.GetVehicle().SetVisualSystem(vis)
vis.SetWindowTitle('FED-Alpha')
vis.SetWindowSize(1280, 1024)
vis.SetChaseCamera(trackPoint, 6.0, 0.5)
vis.Initialize()
vis.AddLogo(chrono.GetChronoDataFile('logo_pychrono_alpha.png'))
vis.AddTypicalLights()
vis.AddSkyBox()
# Create the interactive driver system
driver = veh.ChIrrGuiDriver(vis)
# Set the time response for steering and throttle keyboard inputs.
steering_time = 1.0 # time to go from 0 to +1 (or from 0 to -1)
throttle_time = 1.0 # time to go from 0 to +1
braking_time = 0.3 # time to go from 0 to +1
driver.SetSteeringDelta(10 * step_size / steering_time)
driver.SetThrottleDelta(10 * step_size / throttle_time)
driver.SetBrakingDelta(10 * step_size / braking_time)
driver.Initialize()
# Simulation loop
realtime_timer = chrono.ChRealtimeStepTimer()
while vis.Run() :
time = my_feda.GetSystem().GetChTime()
vis.BeginScene()
vis.DrawAll()
vis.EndScene()
# Get driver inputs
driver_inputs = driver.GetInputs()
# Update modules (process inputs from other modules)
driver.Synchronize(time)
terrain.Synchronize(time)
my_feda.Synchronize(time, driver_inputs, terrain)
vis.Synchronize(driver.GetInputModeAsString(), driver_inputs)
# Advance simulation for one timestep for all modules
driver.Advance(step_size)
terrain.Advance(step_size)
my_feda.Advance(step_size)
vis.Advance(step_size)
# Spin in place for real time to catch up
realtime_timer.Spin(step_size)
return 0
# The path to the Chrono data directory containing various assets (meshes, textures, data files)
# is automatically set, relative to the default location of this demo.
# If running from a different directory, you must change the path to the data directory with:
#chrono.SetChronoDataPath('path/to/data')
veh.SetDataPath(chrono.GetChronoDataPath() + 'vehicle/')
# Initial vehicle location and orientation
initLoc = chrono.ChVectorD(0, 0, 0.5)
initRot = chrono.ChQuaternionD(1, 0, 0, 0)
# Visualization type for vehicle parts (PRIMITIVES, MESH, or NONE)
chassis_vis_type = veh.VisualizationType_MESH
suspension_vis_type = veh.VisualizationType_PRIMITIVES
steering_vis_type = veh.VisualizationType_PRIMITIVES
wheel_vis_type = veh.VisualizationType_MESH
tire_vis_type = veh.VisualizationType_MESH
# Collision type for chassis (PRIMITIVES, MESH, or NONE)
chassis_collision_type = veh.CollisionType_NONE
# Type of powertrain model (SHAFTS, SIMPLE_MAP)
powertrain_model = veh.PowertrainModelType_SIMPLE_MAP
# Type of tire model (RIGID, PAC02)
tire_model = veh.TireModelType_PAC02
# Rigid terrain
terrainHeight = 0; # terrain height (FLAT terrain only)
terrainLength = 100.0; # size in X direction
terrainWidth = 100.0; # size in Y direction
# Point on chassis tracked by the camera
trackPoint = chrono.ChVectorD(0.0, 0.0, 1.75)
# Contact method
contact_method = chrono.ChContactMethod_SMC
# Simulation step sizes
step_size = 1e-3;
tire_step_size = 1e-3;
main()
|
py | 1a4949da7fa036474bda60b3eb26924778257458 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
__all__ = ['ControllerDetails']
class ControllerDetails(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
Represents an instance of a DNC controller.
API Version: 2020-08-08-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] location: Location of the resource.
:param pulumi.Input[str] resource_group_name: The name of the Azure Resource group of which a given DelegatedNetwork resource is part. This name must be at least 1 character in length, and no more than 90.
:param pulumi.Input[str] resource_name_: The name of the resource. It must be a minimum of 3 characters, and a maximum of 63.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: The resource tags.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['location'] = location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
__props__['resource_name'] = resource_name_
__props__['tags'] = tags
__props__['dnc_app_id'] = None
__props__['dnc_endpoint'] = None
__props__['dnc_tenant_id'] = None
__props__['name'] = None
__props__['provisioning_state'] = None
__props__['resource_guid'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:delegatednetwork:ControllerDetails"), pulumi.Alias(type_="azure-native:delegatednetwork/v20200808preview:ControllerDetails"), pulumi.Alias(type_="azure-nextgen:delegatednetwork/v20200808preview:ControllerDetails")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ControllerDetails, __self__).__init__(
'azure-native:delegatednetwork:ControllerDetails',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ControllerDetails':
"""
Get an existing ControllerDetails resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["dnc_app_id"] = None
__props__["dnc_endpoint"] = None
__props__["dnc_tenant_id"] = None
__props__["location"] = None
__props__["name"] = None
__props__["provisioning_state"] = None
__props__["resource_guid"] = None
__props__["tags"] = None
__props__["type"] = None
return ControllerDetails(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="dncAppId")
def dnc_app_id(self) -> pulumi.Output[str]:
"""
dnc application id should be used by customer to authenticate with dnc gateway.
"""
return pulumi.get(self, "dnc_app_id")
@property
@pulumi.getter(name="dncEndpoint")
def dnc_endpoint(self) -> pulumi.Output[str]:
"""
dnc endpoint url that customers can use to connect to
"""
return pulumi.get(self, "dnc_endpoint")
@property
@pulumi.getter(name="dncTenantId")
def dnc_tenant_id(self) -> pulumi.Output[str]:
"""
tenant id of dnc application id
"""
return pulumi.get(self, "dnc_tenant_id")
@property
@pulumi.getter
def location(self) -> pulumi.Output[Optional[str]]:
"""
Location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[str]:
"""
The current state of dnc controller resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="resourceGuid")
def resource_guid(self) -> pulumi.Output[str]:
"""
Resource guid.
"""
return pulumi.get(self, "resource_guid")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of resource.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py | 1a494a3a842a103512814693c4d542c84af5f081 | from django import forms
from .models import Comment, Review
class ReviewForm(forms.ModelForm):
class Meta:
model = Review
fields = ('rating', 'experience', 'description')
def clean_rating(self):
data = self.cleaned_data['rating']
if not data >= 1 and data <= 5:
raise forms.ValidationError("Please choose a rating")
return data
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ('body',)
|
py | 1a494aa2da01db2ac12ed21a1ce6643f45aa5776 | #!/usr/bin/env python3
# Copyright (c) 2015-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction RPC."""
from test_framework.test_framework import TitcoinTestFramework
from test_framework.util import *
class SignRawTransactionsTest(TitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def successful_signing_test(self):
"""Create and sign a valid raw transaction with one input.
Expected results:
1) The transaction has a complete set of signatures
2) No script verification error occurred"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N', 'cVKpPfVKSJxKqVpE9awvXNWuLHCa5j5tiE7K6zbUSptFpTEtiFrA']
inputs = [
# Valid pay-to-pubkey scripts
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
{'txid': '83a4f6a6b73660e13ee6cb3c6063fa3759c50c9b7521d0536022961898f4fb02', 'vout': 0,
'scriptPubKey': '76a914669b857c03a5ed269d5d85a1ffac9ed5d663072788ac'},
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, inputs, privKeys)
# 1) The transaction has a complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], True)
# 2) No script verification error occurred
assert 'errors' not in rawTxSigned
def script_verification_error_test(self):
"""Create and sign a raw transaction with valid (vin 0), invalid (vin 1) and one missing (vin 2) input script.
Expected results:
3) The transaction has no complete set of signatures
4) Two script verification errors occurred
5) Script verification errors have certain properties ("txid", "vout", "scriptSig", "sequence", "error")
6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)"""
privKeys = ['cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N']
inputs = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7},
# Missing scriptPubKey
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 1},
]
scripts = [
# Valid pay-to-pubkey script
{'txid': '9b907ef1e3c26fc71fe4a4b3580bc75264112f95050014157059c736f0202e71', 'vout': 0,
'scriptPubKey': '76a91460baa0f494b38ce3c940dea67f3804dc52d1fb9488ac'},
# Invalid script
{'txid': '5b8673686910442c644b1f4993d8f7753c7c8fcb5c87ee40d56eaeef25204547', 'vout': 7,
'scriptPubKey': 'badbadbadbad'}
]
outputs = {'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB': 0.1}
rawTx = self.nodes[0].createrawtransaction(inputs, outputs)
# Make sure decoderawtransaction is at least marginally sane
decodedRawTx = self.nodes[0].decoderawtransaction(rawTx)
for i, inp in enumerate(inputs):
assert_equal(decodedRawTx["vin"][i]["txid"], inp["txid"])
assert_equal(decodedRawTx["vin"][i]["vout"], inp["vout"])
# Make sure decoderawtransaction throws if there is extra data
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, rawTx + "00")
rawTxSigned = self.nodes[0].signrawtransaction(rawTx, scripts, privKeys)
# 3) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 4) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 5) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# 6) The verification errors refer to the invalid (vin 1) and missing input (vin 2)
assert_equal(rawTxSigned['errors'][0]['txid'], inputs[1]['txid'])
assert_equal(rawTxSigned['errors'][0]['vout'], inputs[1]['vout'])
assert_equal(rawTxSigned['errors'][1]['txid'], inputs[2]['txid'])
assert_equal(rawTxSigned['errors'][1]['vout'], inputs[2]['vout'])
assert not rawTxSigned['errors'][0]['witness']
# Now test signing failure for transaction with input witnesses
p2wpkh_raw_tx = "01000000000102fff7f7881a8099afa6940d42d1e7f6362bec38171ea3edf433541db4e4ad969f00000000494830450221008b9d1dc26ba6a9cb62127b02742fa9d754cd3bebf337f7a55d114c8e5cdd30be022040529b194ba3f9281a99f2b1c0a19c0489bc22ede944ccf4ecbab4cc618ef3ed01eeffffffef51e1b804cc89d182d279655c3aa89e815b1b309fe287d9b2b55d57b90ec68a0100000000ffffffff02202cb206000000001976a9148280b37df378db99f66f85c95a783a76ac7a6d5988ac9093510d000000001976a9143bde42dbee7e4dbe6a21b2d50ce2f0167faa815988ac000247304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee0121025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee635711000000"
rawTxSigned = self.nodes[0].signrawtransaction(p2wpkh_raw_tx)
# 7) The transaction has no complete set of signatures
assert 'complete' in rawTxSigned
assert_equal(rawTxSigned['complete'], False)
# 8) Two script verification errors occurred
assert 'errors' in rawTxSigned
assert_equal(len(rawTxSigned['errors']), 2)
# 9) Script verification errors have certain properties
assert 'txid' in rawTxSigned['errors'][0]
assert 'vout' in rawTxSigned['errors'][0]
assert 'witness' in rawTxSigned['errors'][0]
assert 'scriptSig' in rawTxSigned['errors'][0]
assert 'sequence' in rawTxSigned['errors'][0]
assert 'error' in rawTxSigned['errors'][0]
# Non-empty witness checked here
assert_equal(rawTxSigned['errors'][1]['witness'], ["304402203609e17b84f6a7d30c80bfa610b5b4542f32a8a0d5447a12fb1366d7f01cc44a0220573a954c4518331561406f90300e8f3358f51928d43c212a8caed02de67eebee01", "025476c2e83188368da1ff3e292e7acafcdb3566bb0ad253f62fc70f07aeee6357"])
assert not rawTxSigned['errors'][0]['witness']
def run_test(self):
self.successful_signing_test()
self.script_verification_error_test()
if __name__ == '__main__':
SignRawTransactionsTest().main()
|
py | 1a494b424d2a40955dbbfb96bd9597051f8ec117 | # Read file, create an array of its values after removing the \n from each line
with open('input') as file:
values = file.readlines()
values = [int(value[:len(value)-1]) for value in values]
# PART 1:
# Create a hashmap of every value's complements to reach 2020.
# When you encounter that complement in the future, multiply the two together and print them.
complements = dict()
for i, value in enumerate(values):
try:
print(f'{values[complements[value]]} * {value} = {value * values[complements[value]]}')
except KeyError:
complements[2020 - value] = i
# In PART 2 I'm going to use the filter() and combinations() functions
from itertools import combinations
import math
def check(val):
return sum(val) == 2020
combinations = list(filter(check, list(combinations(values, 3))))
for combination in combinations:
print(f'{combination[0]} * {combination[1]} * {combination[2]} = {math.prod(combination)}')
|
py | 1a494e3da23528c7cd1e04ed565d5203f889ea5b | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
# from keras.layers.merge import Concatenate, Add, Dot, Multiply
import glob
import os
import zipfile
import keras
import numpy as np
import tensorflow as tf
from PIL import Image
from keras import backend as K
from sklearn.metrics import classification_report, confusion_matrix
from tensorflow.python.lib.io import file_io
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import tag_constants, signature_constants
from tensorflow.python.saved_model.signature_def_utils_impl import predict_signature_def
LABEL_SET = ['left', 'right', 'up', 'down', 'center', 'double_blink']
def normalize_image(img):
# return (img - 127.5) / 127.5
return (img.astype(np.float32) - 127.5) / 127.5
def denormalize_image(img):
result = img * 127.5 + 127.5
return result.astype(np.uint8)
NORMALIZE = False
def to_savedmodel(model, export_path):
"""Convert the Keras HDF5 model into TensorFlow SavedModel."""
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'input': model.inputs[0]},
outputs={'income': model.outputs[0]})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature}
)
builder.save()
def session_to_savedmodel(session, inputs, outputs, export_path):
"""Convert the Keras HDF5 model into TensorFlow SavedModel."""
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(inputs={'inputs': inputs},
outputs={'outputs': outputs})
builder.add_meta_graph_and_variables(
sess=session,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature}
)
builder.save()
def session_from_savedmodel(session, export_dir):
tf.saved_model.loader.load(session, [tag_constants.SERVING], export_dir)
def compare_url(a, b):
ia = int(a.split('/')[-1].replace('img_', '').split('.')[0])
prefix_a = '/'.join(a.split('/')[:-1])
ib = int(b.split('/')[-1].replace('img_', '').split('.')[0])
prefix_b = '/'.join(b.split('/')[:-1])
if prefix_a == prefix_b:
return ia - ib
elif prefix_a > prefix_b:
return 1
else:
return 0
# key compare urls
OFFSET_BATCH = 1000000
def key_compare_url(a):
ia = int(a.split('/')[-1].replace('img_', '').split('.')[0])
batch_num = int(a.split('/')[-2].replace('batch_', ''))
# prefix_a = '/'.join(a.split('/')[:-1])
return batch_num * OFFSET_BATCH + ia
def load_npz(url):
files = np.load(url)
return files['X'], files['y']
# big problem with sorting data!!!!!!
def load_dataset(base_urls, label_set, sequence_length=15, get_zip=True):
globs = {}
print(base_urls)
zips = {}
zip_dirs = {}
if os.path.isdir(base_urls[0]):
get_zip = False
if not get_zip:
for l in label_set:
globs[l] = []
for d in base_urls:
# print ('Open folder label {} in {}'.format(l, d))
path = os.path.join(d, l)
# print (path)
globs[l] += glob.glob('{dir}/*/*.jpg'.format(dir=path))
globs[l].sort(compare_url)
else:
for d in base_urls:
zips[d] = zipfile.ZipFile(retrieve_file(d), 'r')
# zips[d] = GzipFile(d, 'r+b')
zip_dirs[d] = {}
z_namelist = [n for n in zips[d].namelist() if n.split(".")[-1].lower() == 'jpg']
for l in label_set:
zip_dirs[d][l] = [n for n in z_namelist if l in n]
# zip_dirs[d][l].sort(compare_url)
zip_dirs[d][l].sort(key=key_compare_url)
# for u in zip_dirs[d][l]:
# print(u)
# datasets
X = []
y = []
y_raws = []
eye = np.eye(len(label_set))
for i, l in enumerate(label_set):
print('Label: {}'.format(l))
if get_zip:
for d in base_urls:
data = []
print('---Read Zip file: {}'.format(d))
for j, img_url in enumerate(zip_dirs[d][l]):
with Image.open(zips[d].open(img_url, 'r')) as img:
# img = Image.open(zips[d].open(img_url, 'r'))
if NORMALIZE:
img_array = normalize_image(np.array(img))
else:
img_array = np.array(img)
if j % sequence_length == 0 and j != 0:
# package into sequence
X.append(np.array(data))
y.append(np.array(eye[i]))
y_raws.append(l)
data = []
# else:
data.append(img_array)
else:
data = []
for j, img_url in enumerate(globs[l]):
# if j >= 61:
# break
with Image.open(img_url) as img:
# img = Image.open(img_url)
if NORMALIZE:
img_array = normalize_image(np.array(img))
else:
img_array = np.array(img)
# img_array = normalize_image(np.array(img))
if j % sequence_length == 0 and j != 0:
# package into sequence
X.append(np.array(data))
y.append(np.array(eye[i]))
y_raws.append(l)
data = []
# else:
data.append(img_array)
if get_zip:
for d in base_urls:
zips[d].close()
X = np.array(X)
y = np.array(y)
print(X.shape)
print(y.shape)
return X, y, y_raws, label_set
# h5py workaround: copy local models over to GCS if the job_dir is GCS.
def copy_file_to_gcs(job_dir, file_path):
with file_io.FileIO(file_path, mode='r') as input_f:
with file_io.FileIO(os.path.join(job_dir, file_path), mode='w+') as output_f:
output_f.write(input_f.read())
def write_file(job_dir, file_path):
if "gs://" in file_path:
print ('Write file to: {}/{}'.format(job_dir, file_path))
# with as f:
return copy_file_to_gcs(job_dir, file_path)
else:
return open(file_path, 'r')
# read file handle opening of gsc
def retrieve_file(file_path):
if "gs://" in file_path:
print ('readata from gcs: {}'.format(file_path))
# with as f:
return file_io.FileIO(file_path, 'r')
else:
return open(file_path, 'r+b')
def after_train(model, model_name, job_dir, print_fn=print):
# def after_train(model, model_file, model_dir, train_config, label_set, model_name='cnn_', print_fn=print):
# Unhappy hack to work around h5py not being able to write to GCS.
# Force snapshots and saves to local filesystem, then copy them over to GCS.
if job_dir.startswith("gs://"):
model.save(model_name)
copy_file_to_gcs(job_dir, model_name)
else:
model.save(os.path.join(job_dir, model_name))
# Convert the Keras model to TensorFlow SavedModel
print_fn('Save model to {}'.format(job_dir))
to_savedmodel(model, os.path.join(job_dir, 'export'))
def report(true_val, pred_val, label_set, epoch=0, print_fn=print, digits=4, **kwargs):
report = classification_report(true_val, pred_val, target_names=label_set, digits=digits)
matrix = confusion_matrix(true_val, pred_val)
print_fn("----- Epoch:{} -----".format(epoch))
if 'loss' in kwargs:
print_fn('--Loss: {}'.format(kwargs['loss']))
print_fn(report)
print_fn(matrix)
class EvalCheckPoint(keras.callbacks.Callback):
def __init__(self, ml_model,
job_dir,
X, y,
label_set,
sequence_lenth,
eval_freq=4,
print_func=print,
epochs=10,
batch_norm=False
):
self.job_dir = job_dir
self.label_set = label_set
self.sequence_length = sequence_lenth
self.X_test = X
self.y_test = y
self.batch_norm = batch_norm
self.epochs = epochs
self.eval_freq = eval_freq
self.model = None
self.print_func = print_func
self.set_model(ml_model)
self.true_val = None
self.pred_val = None
self.true_val = np.array(np.argmax(self.y_test, axis=1))
def on_epoch_begin(self, epoch, logs={}):
if epoch > 0 and (epoch % self.eval_freq == 0 or epoch == self.epochs):
if self.model is not None:
# if self.batch_norm:
K.set_learning_phase(0)
pred_val = np.argmax(self.model.predict(self.X_test), axis=1)
K.set_learning_phase(1)
report(self.true_val, pred_val, self.label_set, print_fn=self.print_func)
|
py | 1a494e4e8f558ee848111ecb66ac129a8d72aadc | from wahlprogramme import create_app
from wahlprogramme.database import load_db
# load database
db = load_db("data/", txt=False)
app = create_app(db)
|
py | 1a494eb4208680fb048af45eded11e1a1fd8af14 | from statement import Statement
def main():
feature = "project_creation"
with open(f"./features/{feature}.feature", "r") as file, open(f"{feature}.steps.ts", "w") as outfile:
outfile.write('import { Given, When, Then, TableDefinition } from "cucumber";\n\n\n')
antecessors = []
previous_patterns = {
"Given": [],
"When": [],
"Then": [],
}
for line in file.readlines():
statement = Statement.for_line(line, outfile)
if statement:
statement.write_statement_block(antecessors, previous_patterns)
if __name__ == '__main__':
main()
|
py | 1a494f8a32e4f9549f44e00501e64ae274b18962 | from utils import detector_utils as detector_utils
import cv2
import tensorflow as tf
import multiprocessing
from multiprocessing import Queue, Pool
import time
from utils.detector_utils import WebcamVideoStream
import datetime
import argparse
frame_processed = 0
score_thresh = 0.2
# Create a worker thread that loads graph and
# does detection on images in an input queue and puts it on an output queue
def worker(input_q, output_q, cap_params, frame_processed):
print(">> loading frozen model for worker")
detection_graph, sess = detector_utils.load_inference_graph()
sess = tf.Session(graph=detection_graph)
while True:
#print("> ===== in worker loop, frame ", frame_processed)
frame = input_q.get()
if (frame is not None):
# actual detection
boxes, scores = detector_utils.detect_objects(
frame, detection_graph, sess)
# draw bounding boxes
detector_utils.draw_box_on_image(
cap_params['num_hands_detect'], cap_params["score_thresh"], scores, boxes, cap_params['im_width'], cap_params['im_height'], frame)
# add frame annotated with bounding box to queue
output_q.put(frame)
frame_processed += 1
else:
output_q.put(frame)
sess.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-src', '--source', dest='video_source', type=int,
default=0, help='Device index of the camera.')
parser.add_argument('-nhands', '--num_hands', dest='num_hands', type=int,
default=2, help='Max number of hands to detect.')
parser.add_argument('-fps', '--fps', dest='fps', type=int,
default=1, help='Show FPS on detection/display visualization')
parser.add_argument('-wd', '--width', dest='width', type=int,
default=300, help='Width of the frames in the video stream.')
parser.add_argument('-ht', '--height', dest='height', type=int,
default=200, help='Height of the frames in the video stream.')
parser.add_argument('-ds', '--display', dest='display', type=int,
default=1, help='Display the detected images using OpenCV. This reduces FPS')
parser.add_argument('-num-w', '--num-workers', dest='num_workers', type=int,
default=4, help='Number of workers.')
parser.add_argument('-q-size', '--queue-size', dest='queue_size', type=int,
default=5, help='Size of the queue.')
args = parser.parse_args()
input_q = Queue(maxsize=args.queue_size)
output_q = Queue(maxsize=args.queue_size)
video_capture = WebcamVideoStream(src=args.video_source,
width=args.width,
height=args.height).start()
cap_params = {}
frame_processed = 0
cap_params['im_width'], cap_params['im_height'] = video_capture.size()
cap_params['score_thresh'] = score_thresh
# max number of hands we want to detect/track
cap_params['num_hands_detect'] = args.num_hands
print(cap_params, args)
# spin up workers to paralleize detection.
pool = Pool(args.num_workers, worker,
(input_q, output_q, cap_params, frame_processed))
start_time = datetime.datetime.now()
num_frames = 0
fps = 0
index = 0
while True:
frame = video_capture.read()
frame = cv2.flip(frame, 1)
index += 1
input_q.put(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
output_frame = output_q.get()
output_frame = cv2.cvtColor(output_frame, cv2.COLOR_RGB2BGR)
elapsed_time = (datetime.datetime.now() -
start_time).total_seconds()
num_frames += 1
fps = num_frames / elapsed_time
# print("frame ", index, num_frames, elapsed_time, fps)
if (output_frame is not None):
if (args.display > 0):
if (args.fps > 0):
detector_utils.draw_fps_on_image(
"FPS : " + str(int(fps)), output_frame)
cv2.imshow('Muilti - threaded Detection', output_frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
if (num_frames == 400):
num_frames = 0
start_time = datetime.datetime.now()
else:
print("frames processed: ", index,
"elapsed time: ", elapsed_time, "fps: ", str(int(fps)))
else:
# print("video end")
break
elapsed_time = (datetime.datetime.now() -
start_time).total_seconds()
fps = num_frames / elapsed_time
print("fps", fps)
pool.terminate()
video_capture.stop()
cv2.destroyAllWindows()
|
py | 1a495026ed8b6f314c3161ea83336be839dd3db0 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.models import User
# Register your models here.
from .models import Article, Comment, Profile, Book, BookTag, BookBlock, BookComment, Movie, Figure, MovieComment, MovieBlock, MovieTag, Picture, Source, Notice, FollowRela, Brand, Genre, Computer, CPU, GPU, Earphone, Phone, Collection, Like
class ProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'media_editor_auth')
list_filter = ('media_editor_auth',)
list_per_page = 10
class ProfileInline(admin.StackedInline):
model = Profile
can_delete = False
verbose_name_plural = 'profile'
filter_horizontal = ('follow', )
class UserAdmin(BaseUserAdmin):
inlines = (ProfileInline, )
class CommentInline(admin.StackedInline):
model = Comment
list_display = ('user',)
list_filter = ('pub_date', 'user')
list_per_page = 10
search_fields = ('user',)
class BookTagInline(admin.StackedInline):
model = BookTag
list_display = ('title',)
list_filter = ('pub_date',)
list_per_page = 10
search_fields = ('title',)
class BookBlockAdmin(admin.ModelAdmin):
list_display = ('title',)
list_per_page = 10
inlines = [BookTagInline, ]
class MovieTagInline(admin.StackedInline):
model = MovieTag
list_display = ('title',)
list_filter = ('pub_date',)
list_per_page = 10
search_fields = ('title',)
class MovieBlockAdmin(admin.ModelAdmin):
list_display = ('title',)
list_per_page = 10
inlines = [MovieTagInline, ]
class ArticleAdmin(admin.ModelAdmin):
list_display = ('id', 'title', 'user', 'status', 'views', 'cover')
list_filter = ('pub_date', 'user')
list_per_page = 10
search_fields = ('user',)
inlines = [CommentInline, ]
class CommentAdmin(admin.ModelAdmin):
list_display = ('user',)
list_filter = ('pub_date', 'user')
list_per_page = 10
search_fields = ('user',)
class MovieAdmin(admin.ModelAdmin):
list_display = ('title', )
list_filter = ('pub_date', )
list_per_page = 10
filter_horizontal = ('actor', 'director', 'writer', 'tag', 'still', 'play_source')
class FigureAdmin(admin.ModelAdmin):
list_display = ('name', )
list_filter = ('name', )
list_per_page = 10
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'subtitle')
list_filter = ('pub_date', )
list_per_page = 10
filter_horizontal = ('tag', )
class NoticeAdmin(admin.ModelAdmin):
list_display = ('release_date', 'tab')
list_filter = ('tab', )
list_per_page = 10
admin.site.unregister(User)
admin.site.register(User, UserAdmin)
admin.site.register(Article, ArticleAdmin)
admin.site.register(Comment, CommentAdmin)
admin.site.register(Book, BookAdmin)
admin.site.register(BookBlock, BookBlockAdmin)
admin.site.register(MovieBlock, MovieBlockAdmin)
admin.site.register(BookTag)
admin.site.register(MovieTag)
admin.site.register(BookComment)
admin.site.register(Movie, MovieAdmin)
admin.site.register(Figure, FigureAdmin)
admin.site.register(MovieComment)
admin.site.register(Profile, ProfileAdmin)
admin.site.register(Picture)
admin.site.register(Source)
admin.site.register(Notice, NoticeAdmin)
admin.site.register(FollowRela)
admin.site.register(Brand)
admin.site.register(Genre)
admin.site.register(Computer)
admin.site.register(CPU)
admin.site.register(GPU)
admin.site.register(Earphone)
admin.site.register(Phone)
admin.site.register(Collection)
admin.site.register(Like) |
py | 1a495218c5d13215971afae32e10a360e2d374cd | import carla
from challenge.data_provider import DataProvider
class AutonomousAgent():
def __init__(self):
# current global plans to reach a destination
self._topological_plan = None,
self._waypoints_plan = None
# this data structure will contain all sensor data
self.data_provider = DataProvider()
# agent's initialization
self.setup()
def setup(self):
"""
Initialize everything needed by your agent.
"""
pass
def sensors_setup(self):
"""
Define the sensor suite required by the agent
:return: a list containing the required sensors in the following format:
[
['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll,
'width': width, 'height': height, 'fov': fov}, 'Sensor01'],
['sensor.camera.rgb', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll,
'width': width, 'height': height, 'fov': fov}, 'Sensor02'],
['sensor.lidar.ray_cast', {'x':x_rel, 'y': y_rel, 'z': z_rel,
'yaw': yaw, 'pitch': pitch, 'roll': roll}, 'Sensor03']
]
"""
sensors = []
return sensors
def run_step(self):
"""
Execute one step of navigation.
:return: control
"""
pass
def __call__(self):
input_data = self.data_provider.get_data()
control = self.run_step(input_data)
control.manual_gear_shift = False
return control
def all_sensors_ready(self):
return self.data_provider.all_sensors_ready()
def set_global_plan(self, topological_plan, waypoints_plan):
self._topological_plan = topological_plan,
self._waypoints_plan = waypoints_plan |
py | 1a4952b0be1c1b84e83d5b3e8967a5a3c66dcdbe | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
collections = Table('collections', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('user_id', INTEGER),
Column('collection_id', INTEGER),
)
users = Table('users', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', Integer),
Column('collection_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['collections'].drop()
post_meta.tables['users'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['collections'].create()
post_meta.tables['users'].drop()
|
py | 1a49531e600932e6a5b169c03f9c698d16b7b919 | ###############################
#
# Created by Patrik Valkovic
# 3/9/2021
#
###############################
import unittest
import ffeat
class EachArgTest(unittest.TestCase):
def test_oneparam(self):
p = ffeat.flow.EachArg(lambda x: x + 1)
result, kargs = p(8)
self.assertSequenceEqual(result, [9])
def test_moreparams(self):
p = ffeat.flow.EachArg(lambda x: x + 1)
result, kargs = p(8, 12, 19, 20)
self.assertSequenceEqual(result, [9, 13, 20, 21])
if __name__ == '__main__':
unittest.main()
|
py | 1a49534c7eda346c42348b2348c18f6d1e3cbe6f | from math import gcd
A, B = map(int, input().split())
print(A * B // gcd(A, B))
|
py | 1a4953b4ab52a2f2607bb630514a2c57071c03c7 | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .action import BlacklistByIocHash
|
py | 1a4953f8e5fc190cd0eae01aafc91012e10d64b2 | import tensorflow as tf
import os
from niftynet.application.base_application import BaseApplication
from niftynet.engine.application_factory import \
ApplicationNetFactory, InitializerFactory, OptimiserFactory
from niftynet.engine.application_variables import \
CONSOLE, NETWORK_OUTPUT, TF_SUMMARIES
from niftynet.engine.sampler_grid import GridSampler
from niftynet.engine.sampler_resize import ResizeSampler
from niftynet.engine.sampler_uniform import UniformSampler
from niftynet.engine.sampler_weighted import WeightedSampler
from niftynet.engine.sampler_balanced import BalancedSampler
from niftynet.engine.windows_aggregator_grid import GridSamplesAggregator
from niftynet.engine.windows_aggregator_resize import ResizeSamplesAggregator
from niftynet.io.image_reader import ImageReader
from niftynet.layer.binary_masking import BinaryMaskingLayer
from niftynet.layer.discrete_label_normalisation import \
DiscreteLabelNormalisationLayer
from niftynet.layer.histogram_normalisation import \
HistogramNormalisationLayer
from niftynet.layer.loss_segmentation import LossFunction
from niftynet.layer.mean_variance_normalisation import \
MeanVarNormalisationLayer
from niftynet.layer.pad import PadLayer
from niftynet.layer.post_processing import PostProcessingLayer
from niftynet.layer.rand_flip import RandomFlipLayer
from niftynet.layer.rand_rotation import RandomRotationLayer
from niftynet.layer.rand_artefact import RandomArtefactLayer
from niftynet.layer.rand_spatial_scaling import RandomSpatialScalingLayer
from niftynet.evaluation.segmentation_evaluator import SegmentationEvaluator
SUPPORTED_INPUT = set(['image', 'label', 'weight', 'sampler', 'inferred'])
class SegmentationApplication(BaseApplication):
REQUIRED_CONFIG_SECTION = "SEGMENTATION"
def __init__(self, net_param, action_param, action):
super(SegmentationApplication, self).__init__()
tf.logging.info('starting segmentation application')
self.action = action
self.net_param = net_param
self.action_param = action_param
self.data_param = None
self.segmentation_param = None
self.SUPPORTED_SAMPLING = {
'uniform': (self.initialise_uniform_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'weighted': (self.initialise_weighted_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
'resize': (self.initialise_resize_sampler,
self.initialise_resize_sampler,
self.initialise_resize_aggregator),
'balanced': (self.initialise_balanced_sampler,
self.initialise_grid_sampler,
self.initialise_grid_aggregator),
}
def initialise_dataset_loader(
self, data_param=None, task_param=None, data_partitioner=None):
self.data_param = data_param
self.segmentation_param = task_param
file_lists = self.get_file_lists(data_partitioner)
# read each line of csv files into an instance of Subject
if self.is_training:
self.readers = []
for file_list in file_lists:
reader = ImageReader({'image', 'label', 'weight', 'sampler'})
reader.initialise(data_param, task_param, file_list)
self.readers.append(reader)
elif self.is_inference:
# in the inference process use image input only
inference_reader = ImageReader({'image'})
file_list = data_partitioner.inference_files
inference_reader.initialise(data_param, task_param, file_list)
self.readers = [inference_reader]
elif self.is_evaluation:
file_list = data_partitioner.inference_files
reader = ImageReader({'image', 'label', 'inferred'})
reader.initialise(data_param, task_param, file_list)
self.readers = [reader]
else:
raise ValueError('Action `{}` not supported. Expected one of {}'
.format(self.action, self.SUPPORTED_ACTIONS))
foreground_masking_layer = None
if self.net_param.normalise_foreground_only:
foreground_masking_layer = BinaryMaskingLayer(
type_str=self.net_param.foreground_type,
multimod_fusion=self.net_param.multimod_foreground_type,
threshold=0.0)
mean_var_normaliser = MeanVarNormalisationLayer(
image_name='image', binary_masking_func=foreground_masking_layer)
histogram_normaliser = None
if self.net_param.histogram_ref_file:
histogram_normaliser = HistogramNormalisationLayer(
image_name='image',
modalities=vars(task_param).get('image'),
model_filename=self.net_param.histogram_ref_file,
binary_masking_func=foreground_masking_layer,
norm_type=self.net_param.norm_type,
cutoff=self.net_param.cutoff,
name='hist_norm_layer')
label_normalisers = None
if self.net_param.histogram_ref_file and \
task_param.label_normalisation:
label_normalisers = [DiscreteLabelNormalisationLayer(
image_name='label',
modalities=vars(task_param).get('label'),
model_filename=self.net_param.histogram_ref_file)]
if self.is_evaluation:
label_normalisers.append(
DiscreteLabelNormalisationLayer(
image_name='inferred',
modalities=vars(task_param).get('inferred'),
model_filename=self.net_param.histogram_ref_file))
label_normalisers[-1].key = label_normalisers[0].key
normalisation_layers = []
if self.net_param.normalisation:
normalisation_layers.append(histogram_normaliser)
if self.net_param.whitening:
normalisation_layers.append(mean_var_normaliser)
if task_param.label_normalisation and \
(self.is_training or not task_param.output_prob):
normalisation_layers.extend(label_normalisers)
augmentation_layers = []
if self.is_training:
if self.action_param.random_artefact != -1:
augmentation_layers.append(RandomArtefactLayer())
if self.action_param.random_flipping_axes != -1:
augmentation_layers.append(RandomFlipLayer(flip_axes=self.action_param.random_flipping_axes))
if self.action_param.scaling_percentage:
augmentation_layers.append(RandomSpatialScalingLayer(
min_percentage=self.action_param.scaling_percentage[0],
max_percentage=self.action_param.scaling_percentage[1]))
if self.action_param.rotation_angle or \
self.action_param.rotation_angle_x or \
self.action_param.rotation_angle_y or \
self.action_param.rotation_angle_z:
rotation_layer = RandomRotationLayer()
if self.action_param.rotation_angle:
rotation_layer.init_uniform_angle(
self.action_param.rotation_angle)
else:
rotation_layer.init_non_uniform_angle(
self.action_param.rotation_angle_x,
self.action_param.rotation_angle_y,
self.action_param.rotation_angle_z)
augmentation_layers.append(rotation_layer)
volume_padding_layer = []
if self.net_param.volume_padding_size:
volume_padding_layer.append(PadLayer(
image_name=SUPPORTED_INPUT,
border=self.net_param.volume_padding_size))
for reader in self.readers:
reader.add_preprocessing_layers(
volume_padding_layer +
normalisation_layers +
augmentation_layers)
def initialise_uniform_sampler(self):
self.sampler = [[UniformSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_weighted_sampler(self):
self.sampler = [[WeightedSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_resize_sampler(self):
self.sampler = [[ResizeSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
shuffle_buffer=self.is_training,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_grid_sampler(self):
self.sampler = [[GridSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
spatial_window_size=self.action_param.spatial_window_size,
window_border=self.action_param.border,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_balanced_sampler(self):
self.sampler = [[BalancedSampler(
reader=reader,
data_param=self.data_param,
batch_size=self.net_param.batch_size,
windows_per_image=self.action_param.sample_per_volume,
queue_length=self.net_param.queue_length) for reader in
self.readers]]
def initialise_grid_aggregator(self):
self.output_decoder = GridSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
window_border=self.action_param.border,
interp_order=self.action_param.output_interp_order)
def initialise_resize_aggregator(self):
self.output_decoder = ResizeSamplesAggregator(
image_reader=self.readers[0],
output_path=self.action_param.save_seg_dir,
window_border=self.action_param.border,
interp_order=self.action_param.output_interp_order)
def initialise_sampler(self):
if self.is_training:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][0]()
elif self.is_inference:
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][1]()
def initialise_aggregator(self):
self.SUPPORTED_SAMPLING[self.net_param.window_sampling][2]()
def initialise_network(self):
w_regularizer = None
b_regularizer = None
reg_type = self.net_param.reg_type.lower()
decay = self.net_param.decay
if reg_type == 'l2' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l2_regularizer(decay)
b_regularizer = regularizers.l2_regularizer(decay)
elif reg_type == 'l1' and decay > 0:
from tensorflow.contrib.layers.python.layers import regularizers
w_regularizer = regularizers.l1_regularizer(decay)
b_regularizer = regularizers.l1_regularizer(decay)
self.net = ApplicationNetFactory.create(self.net_param.name)(
num_classes=self.segmentation_param.num_classes,
w_initializer=InitializerFactory.get_initializer(
name=self.net_param.weight_initializer),
b_initializer=InitializerFactory.get_initializer(
name=self.net_param.bias_initializer),
w_regularizer=w_regularizer,
b_regularizer=b_regularizer,
acti_func=self.net_param.activation_function)
def connect_data_and_network(self,
outputs_collector=None,
gradients_collector=None):
# def data_net(for_training):
# with tf.name_scope('train' if for_training else 'validation'):
# sampler = self.get_sampler()[0][0 if for_training else -1]
# data_dict = sampler.pop_batch_op()
# image = tf.cast(data_dict['image'], tf.float32)
# return data_dict, self.net(image, is_training=for_training)
def switch_sampler(for_training):
with tf.name_scope('train' if for_training else 'validation'):
sampler = self.get_sampler()[0][0 if for_training else -1]
return sampler.pop_batch_op()
if self.is_training:
# if self.action_param.validation_every_n > 0:
# data_dict, net_out = tf.cond(tf.logical_not(self.is_validation),
# lambda: data_net(True),
# lambda: data_net(False))
# else:
# data_dict, net_out = data_net(True)
if self.action_param.validation_every_n > 0:
data_dict = tf.cond(tf.logical_not(self.is_validation),
lambda: switch_sampler(for_training=True),
lambda: switch_sampler(for_training=False))
else:
data_dict = switch_sampler(for_training=True)
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, is_training=self.is_training)
with tf.name_scope('Optimiser'):
optimiser_class = OptimiserFactory.create(
name=self.action_param.optimiser)
self.optimiser = optimiser_class.get_instance(
learning_rate=self.action_param.lr)
loss_func = LossFunction(
n_class=self.segmentation_param.num_classes,
loss_type=self.action_param.loss_type,
softmax=self.segmentation_param.softmax)
data_loss = loss_func(
prediction=net_out,
ground_truth=data_dict.get('label', None),
weight_map=data_dict.get('weight', None))
reg_losses = tf.get_collection(
tf.GraphKeys.REGULARIZATION_LOSSES)
if self.net_param.decay > 0.0 and reg_losses:
reg_loss = tf.reduce_mean(
[tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
loss = data_loss + reg_loss
else:
loss = data_loss
grads = self.optimiser.compute_gradients(loss)
# collecting gradients variables
gradients_collector.add_to_collection([grads])
# collecting output variables
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=False, collection=CONSOLE)
outputs_collector.add_to_collection(
var=data_loss, name='loss',
average_over_devices=True, summary_type='scalar',
collection=TF_SUMMARIES)
# outputs_collector.add_to_collection(
# var=image*180.0, name='image',
# average_over_devices=False, summary_type='image3_sagittal',
# collection=TF_SUMMARIES)
# outputs_collector.add_to_collection(
# var=image, name='image',
# average_over_devices=False,
# collection=NETWORK_OUTPUT)
# outputs_collector.add_to_collection(
# var=tf.reduce_mean(image), name='mean_image',
# average_over_devices=False, summary_type='scalar',
# collection=CONSOLE)
elif self.is_inference:
# converting logits into final output for
# classification probabilities or argmax classification labels
data_dict = switch_sampler(for_training=False)
image = tf.cast(data_dict['image'], tf.float32)
net_out = self.net(image, is_training=True)
output_prob = self.segmentation_param.output_prob
num_classes = self.segmentation_param.num_classes
if output_prob and num_classes > 1:
post_process_layer = PostProcessingLayer(
'SOFTMAX', num_classes=num_classes)
elif not output_prob and num_classes > 1:
post_process_layer = PostProcessingLayer(
'ARGMAX', num_classes=num_classes)
else:
post_process_layer = PostProcessingLayer(
'IDENTITY', num_classes=num_classes)
net_out = post_process_layer(net_out)
outputs_collector.add_to_collection(
var=net_out, name='window',
average_over_devices=False, collection=NETWORK_OUTPUT)
outputs_collector.add_to_collection(
var=data_dict['image_location'], name='location',
average_over_devices=False, collection=NETWORK_OUTPUT)
self.initialise_aggregator()
def interpret_output(self, batch_output):
if self.is_inference:
return self.output_decoder.decode_batch(
batch_output['window'], batch_output['location'])
return True
def initialise_evaluator(self, eval_param):
self.eval_param = eval_param
self.evaluator = SegmentationEvaluator(self.readers[0],
self.segmentation_param,
eval_param)
def add_inferred_output(self, data_param, task_param):
return self.add_inferred_output_like(data_param, task_param, 'label')
|
py | 1a49550594c0933a1cd34b31d99fc274e084ab6c | # Solution of;
# Project Euler Problem 439: Sum of sum of divisors
# https://projecteuler.net/problem=439
#
# Let d(k) be the sum of all divisors of k. We define the function S(N) =
# $\sum_{i=1}^N \sum_{j=1}^Nd(i \cdot j)$. For example, S(3) = d(1) + d(2) +
# d(3) + d(2) + d(4) + d(6) + d(3) + d(6) + d(9) = 59. You are given that
# S(103) = 563576517282 and S(105) mod 109 = 215766508. Find S(1011) mod 109.
#
# by lcsm29 http://github.com/lcsm29/project-euler
import timed
def dummy(n):
pass
if __name__ == '__main__':
n = 1000
i = 10000
prob_id = 439
timed.caller(dummy, n, i, prob_id)
|
py | 1a49551ad29ab9043a6597ac03175bc472cd3b37 | import subprocess
import paho.mqtt.client as mqtt
import json
import time
class Control:
def __init__(self):
self.Fname = "fname"
self.Fhash = "fhash"
self.Chash = "chash"
self.Runner = set()
self.JobID = "JobID"
def Publish(self, target, channel, message):
client = mqtt.Client()
client.max_inflight_messages_set(200000)
client.connect(target, 1883)
client.loop_start()
msg_info = client.publish(channel, message, qos=1)
if msg_info.is_published() == False:
msg_info.wait_for_publish()
client.disconnect()
def DataUpload(self):
# ClusterSpec Upload
cmd = "timeout 10 ipfs add ClusterSpec.json"
output = subprocess.check_output(cmd, shell=True)
tmp = output.split(" ")
self.Fhash = tmp[1]
self.Fname = tmp[2].replace("\n","")
# Create_Worker Upload
cmd = "timeout 10 ipfs add create_worker.py"
output = subprocess.check_output(cmd, shell=True)
self.Chash = output.split(" ")[1]
def GetSwarm(self):
Rset = set()
cmd = "ipfs swarm peers"
output = subprocess.check_output(cmd, shell=True)
tmp = output.split("\n")
for x in tmp:
if x=="":continue
Rset.add(x.split("/")[2])
return Rset
def SetKRunner(self,K):
# How to choose K good machine...
# To be continued...
Rset = set()
cmd = "ipfs swarm peers"
output = subprocess.check_output(cmd, shell=True)
tmp = output.split("\n")
for i in range(len(tmp)):
if i >= K or tmp[i]=="": break
tmpp = tmp[i].split("/")
self.Runner.add((tmpp[2],tmpp[len(tmpp)-1],i)) # format : tuple(IP, NodeID, RunnerID)
print self.Runner
def SetClusterSpec(self):
f = open('ClusterSpec.conf','r')
Jspec = ""
while True:
line = f.readline()
if not line:
break
try:
Jspec = json.loads(line)
break
except:
print "Bad ClusterSpec.conf"
exit(0)
Wcnt = 0 # worker counter
for x in Jspec:
Wcnt += len(Jspec[x])
self.SetKRunner(Wcnt) ##############################################
RunnerList = list()
for x in self.Runner:
RunnerList.append(x[0]+":2222")
Rcnt = 0 # Runner counter
TaskIndex = dict()
RealSpec = dict()
for x in Jspec:
TmpList = list()
for y in Jspec[x]:
TmpList.append(RunnerList[Rcnt])
TaskIndex[RunnerList[Rcnt]] = y # set index to runner
Rcnt += 1
RealSpec[x] = TmpList
print json.dumps(Jspec)
print json.dumps(RealSpec)
OutputConf = dict()
OutputConf["ClusterSpec"] = RealSpec
OutputConf["TaskIndex"] = TaskIndex
fw = open('ClusterSpec.json','w')
fw.write(json.dumps(OutputConf))
fw.close()
# default setting
DefaultJname = ""
for x in Jspec:
DefaultJname = x
break
fw = open('create_worker.py','w')
fw.write("import sys\n")
fw.write("task_number = int(sys.argv[1])\n")
fw.write("import tensorflow as tf\n")
fw.write("cluster = tf.train.ClusterSpec("+json.dumps(RealSpec)+")\n")
fw.write("# You can write yourself below.\n")
fw.write("server = tf.train.Server(cluster, job_name=\""+DefaultJname+"\", task_index=task_number)\n")
fw.write("server.start()\n")
fw.write("server.join()\n")
fw.close()
def CallDownload(self):
if self.Fhash == "fhash" or self.Chash == "chash":
print "PLEASE UPLOAD FIRST"
return
if len(self.Runner) != 0:
for x in self.Runner:
self.Publish(x[0],"Download",self.Fhash+"###"+self.Fname)
self.Publish(x[0],"Download",self.Chash+"###ClusterSpec.json")
else:
f = open("ClusterSpec.json",'r')
while True:
line = f.readline()
if not line:
break
try:
Jline = json.loads(line)
Tkey = Jline["TaskIndex"].keys()
for x in Tkey:
RemoteIP = x.split(":")[0]
self.Publish(RemoteIP,"Download",self.Fhash+"###"+self.Fname)
self.Publish(RemoteIP,"Download",self.Chash+"###create_worker.py")
self.Publish(RemoteIP,"RunCluster",Jline["TaskIndex"][x])
print Jline["TaskIndex"][x]
except:
print "No Good ClusterSpec.json"
exit(0)
def CloseCluster(self):
if len(self.Runner) != 0:
for x in self.Runner:
self.Publish(x[0],"CloseCluster","KEVIN")
else:
f = open("ClusterSpec.json",'r')
while True:
line = f.readline()
if not line:
break
try:
Jline = json.loads(line)
Tkey = Jline["TaskIndex"].keys()
for x in Tkey:
RemoteIP = x.split(":")[0]
self.Publish(RemoteIP,"CloseCluster","KEVIN")
except:
print "No Good ClusterSpec.json"
exit(0)
|
py | 1a4955bbf5c952b0857be53de74d2ef68fe601d1 | from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from components.base_component import BaseComponent
from equipment_types import EquipmentType
if TYPE_CHECKING:
from entity import Actor, Item
class Equipment(BaseComponent):
parent: Actor
def __init__(self, weapon: Optional[Item] = None, armor: Optional[Item] = None):
self.weapon = weapon
self.armor = armor
@property
def defense_bonus(self) -> int:
bonus = 0
if self.weapon is not None and self.weapon.equipable is not None:
bonus += self.weapon.equipable.defense_bonus
if self.armor is not None and self.armor.equipable is not None:
bonus += self.armor.equipable.defense_bonus
return bonus
@property
def power_bonus(self) -> int:
bonus = 0
if self.weapon is not None and self.weapon.equipable is not None:
bonus += self.weapon.equipable.power_bonus
if self.armor is not None and self.armor.equipable is not None:
bonus += self.armor.equipable.power_bonus
return bonus
def item_is_equipped(self, item: Item) -> bool:
return self.weapon == item or self.armor == item
def unequip_message(self, item_name: str) -> None:
self.parent.gamemap.engine.message_log.add_message(
f"You remove the {item_name}."
)
def equip_message(self, item_name: str) -> None:
self.parent.gamemap.engine.message_log.add_message(
f"You gear up and equip the {item_name}."
)
def equip_to_slot(self, slot: str, item: Item, add_message: bool) -> None:
current_item = getattr(self, slot)
if current_item is not None:
self.unequip_from_slot(slot, add_message)
setattr(self, slot, item)
if add_message:
self.equip_message(item.name)
def unequip_from_slot(self, slot: str, add_message: bool) -> None:
current_item = getattr(self, slot)
if add_message:
self.unequip_message(current_item.name)
setattr(self, slot, None)
def toggle_equip(self, equipable_item: Item, add_message: bool = True) -> None:
if (
equipable_item.equipable
and equipable_item.equipable.equipment_type == EquipmentType.WEAPON
):
slot = "weapon"
else:
slot = "armor"
if getattr(self, slot) == equipable_item:
self.unequip_from_slot(slot, add_message)
else:
self.equip_to_slot(slot, equipable_item, add_message)
|
py | 1a4955c5bc081c940356cb287e14deb835654aae | #!/usr/bin/env python3
import ecc_ed25519
import sys, getopt
from cryptography.hazmat.primitives.asymmetric import ed25519
msg = ""
public_key_hex = ""
encoded_signature = ""
try:
opts, args = getopt.getopt(sys.argv[1:],"hm:k:s:",["message=","publickeyhex=", "signature="])
except getopt.GetoptError:
print('verify.py -m YOURMESSAGE -k PUBLIC-KEY-HEX -s SIGNATURE')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('verify.py -m YOURMESSAGE -k PUBLIC-KEY-HEX -s SIGNATURE')
sys.exit()
elif opt in ("-m", "--message"):
msg = arg
msg_as_bytes = str.encode(msg)
elif opt in ("-k", "--publickeyhex"):
public_key_hex = arg
# Get rid of the prefix 01
public_bytes_from_hex = bytes.fromhex(public_key_hex[2:])
loaded_public_key = ed25519.Ed25519PublicKey.from_public_bytes(public_bytes_from_hex)
elif opt in ("-s", "--signature"):
encoded_signature = arg
signature = bytes.fromhex(encoded_signature)
if msg == "" or encoded_signature == "":
print("Message and signature are required!")
print('./verify.py -m YOURMESSAGE -k PUBLIC-KEY-HEX -s SIGNATURE')
sys.exit()
# Read the public_key_hex from default location if not given as param
if public_key_hex == "":
public_key_hex_location = "/etc/casper/validator_keys/public_key_hex"
try:
with open(public_key_hex_location, 'r') as fstream:
public_key_hex = fstream.readlines()[0]
# Get rid of the prefix
public_bytes_from_hex = bytes.fromhex(public_key_hex[2:])
loaded_public_key = ed25519.Ed25519PublicKey.from_public_bytes(public_bytes_from_hex)
except:
print("ERROR: Couldn't access your public key hex at this location: ", public_key_hex_location)
print("Please make sure your public_key_hex file is at the given location and is accessible by the current user.")
print("You can also directly provide your public key as an input parameter.")
print("USAGE: verify.py -m YOURMESSAGE -k PUBLIC-KEY-HEX -s SIGNATURE")
sys.exit()
print("Public Key:\n", public_key_hex)
print("Message:\n", msg)
print("Signature:\n", encoded_signature)
# Verify
try:
loaded_public_key.verify(signature, msg_as_bytes)
print("Verified!")
except:
print("Verification failed!") |
py | 1a495603b19e7747574353ea7210387de48c7fe4 | # -*- coding: utf-8 -*-
# @Author: Jie
# @Date: 2017-06-15 14:23:06
# @Last Modified by: Jie Yang, Contact: [email protected]
# @Last Modified time: 2017-12-14 13:43:42
import sys
import numpy as np
from alphabet import Alphabet
def normalize_word(word):
new_word = ""
for char in word:
if char.isdigit():
new_word += '0'
else:
new_word += char
return new_word
def read_instance(input_file, word_alphabet, char_alphabet, label_alphabet, number_normalized, max_sent_length, char_padding_size=-1, char_padding_symbol = '</pad>'):
in_lines = open(input_file,'r').readlines()
instence_texts = []
instence_Ids = []
words = []
chars = []
labels = []
word_Ids = []
char_Ids = []
label_Ids = []
for line in in_lines:
if len(line) > 2:
pairs = line.strip().split()
word = pairs[0].decode('utf-8')
if number_normalized:
word = normalize_word(word)
label = pairs[-1]
words.append(word)
labels.append(label)
word_Ids.append(word_alphabet.get_index(word))
label_Ids.append(label_alphabet.get_index(label))
char_list = []
char_Id = []
for char in word:
char_list.append(char)
if char_padding_size > 0:
char_number = len(char_list)
if char_number < char_padding_size:
char_list = char_list + [char_padding_symbol]*(char_padding_size-char_number)
assert(len(char_list) == char_padding_size)
else:
### not padding
pass
for char in char_list:
char_Id.append(char_alphabet.get_index(char))
chars.append(char_list)
char_Ids.append(char_Id)
else:
if (max_sent_length < 0) or (len(words) < max_sent_length):
instence_texts.append([words, chars, labels])
instence_Ids.append([word_Ids, char_Ids,label_Ids])
words = []
chars = []
labels = []
word_Ids = []
char_Ids = []
label_Ids = []
return instence_texts, instence_Ids
def build_pretrain_embedding(embedding_path, word_alphabet, embedd_dim=100, norm=True):
embedd_dict = dict()
if embedding_path != None:
embedd_dict, embedd_dim = load_pretrain_emb(embedding_path)
alphabet_size = word_alphabet.size()
scale = np.sqrt(3.0 / embedd_dim)
pretrain_emb = np.empty([word_alphabet.size(), embedd_dim])
perfect_match = 0
case_match = 0
not_match = 0
for word, index in word_alphabet.iteritems():
if word in embedd_dict:
if norm:
pretrain_emb[index,:] = norm2one(embedd_dict[word])
else:
pretrain_emb[index,:] = embedd_dict[word]
perfect_match += 1
elif word.lower() in embedd_dict:
if norm:
pretrain_emb[index,:] = norm2one(embedd_dict[word.lower()])
else:
pretrain_emb[index,:] = embedd_dict[word.lower()]
case_match += 1
else:
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedd_dim])
not_match += 1
pretrained_size = len(embedd_dict)
print("Embedding:\n pretrain word:%s, prefect match:%s, case_match:%s, oov:%s, oov%%:%s"%(pretrained_size, perfect_match, case_match, not_match, (not_match+0.)/alphabet_size))
return pretrain_emb, embedd_dim
def norm2one(vec):
root_sum_square = np.sqrt(np.sum(np.square(vec)))
return vec/root_sum_square
def load_pretrain_emb(embedding_path):
embedd_dim = -1
embedd_dict = dict()
with open(embedding_path, 'r') as file:
for line in file:
line = line.strip()
if len(line) == 0:
continue
tokens = line.split()
if embedd_dim < 0:
embedd_dim = len(tokens) - 1
else:
assert (embedd_dim + 1 == len(tokens))
embedd = np.empty([1, embedd_dim])
embedd[:] = tokens[1:]
embedd_dict[tokens[0].decode('utf-8')] = embedd
return embedd_dict, embedd_dim
if __name__ == '__main__':
a = np.arange(9.0)
print a
print norm2one(a)
|
py | 1a4956dd7a07bdd2e9579871fc7d476385b0c7b8 | from django.shortcuts import redirect
from django.http import HttpResponse
from .models import Link
def index(request):
return HttpResponse("Hello, world. You're at the polls index.")
def openLink(request, temp):
redirectLink = Link.objects.get(name=temp)
link = redirectLink.redirect
print(link)
return redirect(link)
|
py | 1a49586496dca9ee46ac7abc51c3d6721783e8fc | # _*_ coding: utf-8 _*_
from sqlalchemy import Column
from ..extension import db
from .constants import USER_ADMIN, USER_NORMAL
from werkzeug import generate_password_hash, check_password_hash
from flask_login import UserMixin
class User(db.Model, UserMixin):
__tablename__ = 'users'
id = Column(db.Integer, primary_key=True)
name = Column(db.String(128), nullable=False, unique=True)
# 避免保存用户密码
_password = Column('password', db.String(256), nullable=False)
def _get_password(self):
return self._password
def _set_password(self, password):
self._password = generate_password_hash(password)
password = db.synonym('_password', descriptor=property(_get_password, _set_password))
def check_password(self, password):
if self.password is None:
return False
return check_password_hash(self.password, password)
type_code = Column(db.SmallInteger, default=USER_NORMAL)
def is_admin(self):
return self.type_code == USER_ADMIN
@classmethod
def authenticate(cls, login, password):
user = cls.query.filter(User.name == login).first()
if user:
authenticated = user.check_password(password)
else:
authenticated = False
return user, authenticated
|
py | 1a49586b06836d1b89a8c0de717517317e04860b | import gym
from gym import spaces
import numpy as np
# from os import path
import snakeoil3_gym as snakeoil3
import numpy as np
import copy
import collections as col
import os
import time
import sys
class TorcsEnv:
terminal_judge_start = 50 #1000 # If after 100 timestep still no progress, terminated
termination_limit_progress = 5 # [km/h], episode terminates if car is running slower than this limit
default_speed = 50
initial_reset = True
def __init__(self, vision=False, throttle=False, gear_change=False):
self.vision = vision
self.throttle = throttle
self.gear_change = gear_change
self.initial_run = True
##print("launch torcs")
os.system('pkill torcs')
time.sleep(0.5)
if self.vision is True:
os.system('torcs -nofuel -nodamage -nolaptime -vision &')
else:
os.system('torcs -nofuel -nolaptime &')
time.sleep(0.5)
os.system('sh autostart.sh')
time.sleep(0.5)
"""
# Modify here if you use multiple tracks in the environment
self.client = snakeoil3.Client(p=3101, vision=self.vision) # Open new UDP in vtorcs
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
obs = client.S.d # Get the current full-observation from torcs
"""
if throttle is False:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(1,))
else:
self.action_space = spaces.Box(low=-1.0, high=1.0, shape=(2,))
if vision is False:
high = np.array([1., np.inf, np.inf, np.inf, 1., np.inf, 1., np.inf])
low = np.array([0., -np.inf, -np.inf, -np.inf, 0., -np.inf, 0., -np.inf])
self.observation_space = spaces.Box(low=low, high=high)
else:
high = np.array([1., np.inf, np.inf, np.inf, 1., np.inf, 1., np.inf, 255])
low = np.array([0., -np.inf, -np.inf, -np.inf, 0., -np.inf, 0., -np.inf, 0])
self.observation_space = spaces.Box(low=low, high=high)
def step(self, u):
#print("Step")
# convert thisAction to the actual torcs actionstr
client = self.client
this_action = self.agent_to_torcs(u)
# Apply Action
action_torcs = client.R.d
# Steering
action_torcs['steer'] = this_action['steer'] # in [-1, 1]
# Simple Autnmatic Throttle Control by Snakeoil
if self.throttle is False:
print("KAUSHIK: SHOULD NOT BE HERE! ")
sys.exit()
target_speed = self.default_speed
if client.S.d['speedX'] < target_speed - (client.R.d['steer']*50):
client.R.d['accel'] += .01
else:
client.R.d['accel'] -= .01
if client.R.d['accel'] > 0.2:
client.R.d['accel'] = 0.2
if client.S.d['speedX'] < 10:
client.R.d['accel'] += 1/(client.S.d['speedX']+.1)
# Traction Control System
if ((client.S.d['wheelSpinVel'][2]+client.S.d['wheelSpinVel'][3]) -
(client.S.d['wheelSpinVel'][0]+client.S.d['wheelSpinVel'][1]) > 5):
action_torcs['accel'] -= .2
else:
action_torcs['accel'] = this_action['accel']
action_torcs['brake'] = this_action['brake']
# Automatic Gear Change by Snakeoil
if self.gear_change is True:
action_torcs['gear'] = this_action['gear']
else:
# Automatic Gear Change by Snakeoil is possible
action_torcs['gear'] = 1
if self.throttle:
if client.S.d['speedX'] > 50:
action_torcs['gear'] = 2
if client.S.d['speedX'] > 80:
action_torcs['gear'] = 3
if client.S.d['speedX'] > 110:
action_torcs['gear'] = 4
if client.S.d['speedX'] > 140:
action_torcs['gear'] = 5
if client.S.d['speedX'] > 170:
action_torcs['gear'] = 6
# Save the privious full-obs from torcs for the reward calculation
obs_pre = copy.deepcopy(client.S.d)
# One-Step Dynamics Update #################################
# Apply the Agent's action into torcs
client.respond_to_server()
# Get the response of TORCS
client.get_servers_input()
# Get the current full-observation from torcs
obs = client.S.d
# Make an obsevation from a raw observation vector from TORCS
self.observation = self.make_observaton(obs)
# Reward setting Here #######################################
# direction-dependent positive reward
track = np.array(obs['track'])
trackPos = np.array(obs['trackPos'])
sp = np.array(obs['speedX'])
damage = np.array(obs['damage'])
rpm = np.array(obs['rpm'])
progress = sp*np.cos(obs['angle']) - np.abs(sp*np.sin(obs['angle'])) - sp * np.abs(obs['trackPos'])
reward = progress
# collision detection
if obs['damage'] - obs_pre['damage'] > 0:
reward = -1
# Termination judgement #########################
episode_terminate = False
#---------------------------------------------------
if (abs(track.any()) > 1 or abs(trackPos) > 1): # Episode is terminated if the car is out of track
print("Out of track ")
reward = -100 #-200
episode_terminate = True
client.R.d['meta'] = True
if self.terminal_judge_start < self.time_step: # Episode terminates if the progress of agent is small
if progress < self.termination_limit_progress:
print("No progress", progress)
reward = -100 # KAUSHIK ADDED THIS
episode_terminate = True
client.R.d['meta'] = True
#---------------------------------------------------
if np.cos(obs['angle']) < 0: # Episode is terminated if the agent runs backward
episode_terminate = True
client.R.d['meta'] = True
if client.R.d['meta'] is True: # Send a reset signal
self.initial_run = False
client.respond_to_server()
self.time_step += 1
return self.get_obs(), reward, client.R.d['meta'], {}
def reset(self, relaunch=False):
#print("Reset")
self.time_step = 0
if self.initial_reset is not True:
self.client.R.d['meta'] = True
self.client.respond_to_server()
## TENTATIVE. Restarting TORCS every episode suffers the memory leak bug!
if relaunch is True:
self.reset_torcs()
print("### TORCS is RELAUNCHED ###")
# Modify here if you use multiple tracks in the environment
self.client = snakeoil3.Client(p=3101, vision=self.vision) # Open new UDP in vtorcs
self.client.MAX_STEPS = np.inf
client = self.client
client.get_servers_input() # Get the initial input from torcs
obs = client.S.d # Get the current full-observation from torcs
self.observation = self.make_observaton(obs)
self.last_u = None
self.initial_reset = False
return self.get_obs()
def end(self):
os.system('pkill torcs')
def get_obs(self):
return self.observation
def reset_torcs(self):
#print("relaunch torcs")
os.system('pkill torcs')
time.sleep(0.5)
if self.vision is True:
os.system('torcs -nofuel -nodamage -nolaptime -vision &')
else:
os.system('torcs -nofuel -nolaptime &')
time.sleep(0.5)
os.system('sh autostart.sh')
time.sleep(0.5)
def agent_to_torcs(self, u):
torcs_action = {'steer': u[0]}
if self.throttle is True: # throttle action is enabled
torcs_action.update({'accel': u[1]})
torcs_action.update({'brake': u[2]})
if self.gear_change is True: # gear change action is enabled
torcs_action.update({'gear': int(u[3])})
return torcs_action
def obs_vision_to_image_rgb(self, obs_image_vec):
image_vec = obs_image_vec
r = image_vec[0:len(image_vec):3]
g = image_vec[1:len(image_vec):3]
b = image_vec[2:len(image_vec):3]
sz = (64, 64)
r = np.array(r).reshape(sz)
g = np.array(g).reshape(sz)
b = np.array(b).reshape(sz)
return np.array([r, g, b], dtype=np.uint8)
def make_observaton(self, raw_obs):
if self.vision is False:
names = ['focus',
'speedX', 'speedY', 'speedZ', 'angle', 'damage',
'opponents',
'rpm',
'track',
'trackPos',
'wheelSpinVel']
Observation = col.namedtuple('Observaion', names)
return Observation(focus=np.array(raw_obs['focus'], dtype=np.float32)/200.,
speedX=np.array(raw_obs['speedX'], dtype=np.float32)/300.0,
speedY=np.array(raw_obs['speedY'], dtype=np.float32)/300.0,
speedZ=np.array(raw_obs['speedZ'], dtype=np.float32)/300.0,
angle=np.array(raw_obs['angle'], dtype=np.float32)/3.1416,
damage=np.array(raw_obs['damage'], dtype=np.float32),
opponents=np.array(raw_obs['opponents'], dtype=np.float32)/200.,
rpm=np.array(raw_obs['rpm'], dtype=np.float32)/10000,
track=np.array(raw_obs['track'], dtype=np.float32)/200.,
trackPos=np.array(raw_obs['trackPos'], dtype=np.float32)/1.,
wheelSpinVel=np.array(raw_obs['wheelSpinVel'], dtype=np.float32))
else:
names = ['focus',
'speedX', 'speedY', 'speedZ', 'angle',
'opponents',
'rpm',
'track',
'trackPos',
'wheelSpinVel',
'img']
Observation = col.namedtuple('Observaion', names)
# Get RGB from observation
#image_rgb = self.obs_vision_to_image_rgb(raw_obs['img']) # KAUSHIK ADDED THIS
image_rgb = self.obs_vision_to_image_rgb(raw_obs[names[8]])
return Observation(focus=np.array(raw_obs['focus'], dtype=np.float32)/200.,
speedX=np.array(raw_obs['speedX'], dtype=np.float32)/self.default_speed,
speedY=np.array(raw_obs['speedY'], dtype=np.float32)/self.default_speed,
speedZ=np.array(raw_obs['speedZ'], dtype=np.float32)/self.default_speed,
opponents=np.array(raw_obs['opponents'], dtype=np.float32)/200.,
rpm=np.array(raw_obs['rpm'], dtype=np.float32),
track=np.array(raw_obs['track'], dtype=np.float32)/200.,
trackPos=np.array(raw_obs['trackPos'], dtype=np.float32)/1.,
wheelSpinVel=np.array(raw_obs['wheelSpinVel'], dtype=np.float32),
img=image_rgb)
|
py | 1a4958ec06d8c1fd1f091362d2c5a22714ed0714 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
DETR Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False,
return_intermediate_dec=False):
super().__init__()
# TransformerEncoderLayerThin
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
# TransformerDecoderLayerThin
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm,
return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, mask, query_embed, pos_embed):
"""
Args:
src: (batch_size, L, d)
mask: (batch_size, L)
query_embed: (#queries, d)
pos_embed: (batch_size, L, d) the same as src
Returns:
"""
# flatten NxCxHxW to HWxNxC
bs, l, d = src.shape
src = src.permute(1, 0, 2) # (L, batch_size, d)
pos_embed = pos_embed.permute(1, 0, 2) # (L, batch_size, d)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1) # (#queries, batch_size, d)
tgt = torch.zeros_like(query_embed)
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed) # (L, batch_size, d)
hs = self.decoder(tgt, memory, memory_key_padding_mask=mask,
pos=pos_embed, query_pos=query_embed) # (#layers, #queries, batch_size, d)
hs = hs.transpose(1, 2) # (#layers, batch_size, #qeries, d)
# memory = memory.permute(1, 2, 0) # (batch_size, d, L)
memory = memory.transpose(0, 1) # (batch_size, L, d)
return hs, memory
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
intermediate = []
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.return_intermediate:
intermediate.append(output)
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayerThin(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
# self.linear1 = nn.Linear(d_model, dim_feedforward)
# self.dropout = nn.Dropout(dropout)
# self.linear2 = nn.Linear(dim_feedforward, d_model)
self.linear = nn.Linear(d_model, d_model)
self.norm = nn.LayerNorm(d_model)
self.dropout = nn.Dropout(dropout)
# self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src2 = self.linear(src2)
src = src + self.dropout(src2)
src = self.norm(src)
# src = src + self.dropout1(src2)
# src = self.norm1(src)
# src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
# src = src + self.dropout2(src2)
# src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
"""not used"""
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
class TransformerDecoderLayerThin(nn.Module):
"""removed intermediate layer"""
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, d_model)
# self.linear1 = nn.Linear(d_model, dim_feedforward)
# self.dropout = nn.Dropout(dropout)
# self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
# self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
# self.dropout3 = nn.Dropout(dropout)
# self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt2 = self.linear1(tgt2)
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
# tgt = tgt + self.dropout2(tgt2)
# tgt = self.norm2(tgt)
# tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
# tgt = tgt + self.dropout3(tgt2)
# tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
py | 1a495ba88924dfeddc933bcf0bce23aca6107394 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Pedro Algarvio ([email protected])`
:copyright: © 2012-2013 by the SaltStack Team, see AUTHORS for more details
:license: Apache 2.0, see LICENSE for more details.
tests.integration.shell.call
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import python libs
import os
import sys
import shutil
import yaml
from datetime import datetime
# Import Salt Testing libs
from salttesting import skipIf
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import salt libs
import integration
class CallTest(integration.ShellCase, integration.ShellCaseCommonTestsMixIn):
_call_binary_ = 'salt-call'
def test_default_output(self):
out = self.run_call('-l quiet test.fib 3')
expect = ['local:',
' |_',
' - 0',
' - 1',
' - 1',
' - 2']
self.assertEqual(expect, out[:-1])
def test_text_output(self):
out = self.run_call('-l quiet --out txt test.fib 3')
expect = [
'local: ([0, 1, 1, 2]'
]
self.assertEqual(''.join(expect), ''.join(out).rsplit(",", 1)[0])
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_user_delete_kw_output(self):
ret = self.run_call('-l quiet -d user.delete')
self.assertIn(
'salt \'*\' user.delete name remove=True force=True',
''.join(ret)
)
def test_issue_6973_state_highstate_exit_code(self):
'''
If there is no tops/master_tops or state file matches
for this minion, salt-call should exit non-zero if invoked with
option --retcode-passthrough
'''
src = os.path.join(integration.FILES, 'file/base/top.sls')
dst = os.path.join(integration.FILES, 'file/base/top.sls.bak')
shutil.move(src, dst)
expected_comment = 'No Top file or external nodes data matches found'
try:
stdout, retcode = self.run_call(
'-l quiet --retcode-passthrough state.highstate',
with_retcode=True
)
finally:
shutil.move(dst, src)
self.assertIn(expected_comment, ''.join(stdout))
self.assertNotEqual(0, retcode)
@skipIf(sys.platform.startswith('win'), 'This test does not apply on Win')
def test_issue_2731_masterless(self):
config_dir = '/tmp/salttest'
minion_config_file = os.path.join(config_dir, 'minion')
this_minion_key = os.path.join(
config_dir, 'pki', 'minions', 'minion_test_issue_2731'
)
minion_config = {
'id': 'minion_test_issue_2731',
'master': 'localhost',
'master_port': 64506,
'root_dir': '/tmp/salttest',
'pki_dir': 'pki',
'cachedir': 'cachedir',
'sock_dir': 'minion_sock',
'open_mode': True,
'log_file': '/tmp/salttest/minion_test_issue_2731',
'log_level': 'quiet',
'log_level_logfile': 'info'
}
# Remove existing logfile
if os.path.isfile('/tmp/salttest/minion_test_issue_2731'):
os.unlink('/tmp/salttest/minion_test_issue_2731')
start = datetime.now()
# Let's first test with a master running
open(minion_config_file, 'w').write(
yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
)
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Calculate the required timeout, since next will fail.
# I needed this because after many attempts, I was unable to catch:
# WARNING: Master hostname: salt not found. Retrying in 30 seconds
ellapsed = datetime.now() - start
timeout = ellapsed.seconds + 3
# Now let's remove the master configuration
minion_config.pop('master')
minion_config.pop('master_port')
open(minion_config_file, 'w').write(
yaml.dump(minion_config, default_flow_style=False)
)
out = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=timeout,
)
try:
self.assertIn(
'Process took more than {0} seconds to complete. '
'Process Killed!'.format(timeout),
out
)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with --local
ret = self.run_script(
'salt-call',
'--config-dir {0} --local cmd.run "echo foo"'.format(
config_dir
),
timeout=15
)
try:
self.assertIn('local:', ret)
except AssertionError:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
raise
# Should work with local file client
minion_config['file_client'] = 'local'
open(minion_config_file, 'w').write(
yaml.dump(minion_config, default_flow_style=False)
)
ret = self.run_script(
'salt-call',
'--config-dir {0} cmd.run "echo foo"'.format(
config_dir
),
timeout=15
)
try:
self.assertIn('local:', ret)
finally:
if os.path.isfile(minion_config_file):
os.unlink(minion_config_file)
# Let's remove our key from the master
if os.path.isfile(this_minion_key):
os.unlink(this_minion_key)
if __name__ == '__main__':
from integration import run_tests
run_tests(CallTest)
|
py | 1a495c94d8bec71b1358d9563c9ee185c35306f9 | # Copyright (c) 2019-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
import pyecvl._core.ecvl as ecvl_core
import pyecvl.ecvl as ecvl_py
pytest.importorskip("pyecvl.augmentations")
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugmentationParam(ecvl):
ap = ecvl.AugmentationParam()
min_, max_ = 0.1, 1.2
ap = ecvl.AugmentationParam(min_, max_)
assert ap.min_ == pytest.approx(min_)
assert ap.max_ == pytest.approx(max_)
ecvl.AugmentationParam.SetSeed(12345)
ap.GenerateValue()
assert min_ <= ap.value_ <= max_
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugRotate(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugRotate([30, 50])
a.Apply(img)
a = ecvl.AugRotate([30, 50], [2, 3])
a.Apply(img)
a = ecvl.AugRotate([30, 50], [2, 3], 1.1)
a.Apply(img)
a = ecvl.AugRotate([30, 50], [2, 3], 1.1, ecvl.InterpolationType.nearest)
a.Apply(img)
a = ecvl.AugRotate([30, 50], [2, 3], 1.1, ecvl.InterpolationType.nearest,
ecvl.InterpolationType.linear)
a.Apply(img)
# fromtext
f = ecvl.AugRotate if ecvl is ecvl_core else ecvl.AugRotate.fromtext
a = f('angle=[30, 50] center=(2, 3) scale=1.1 interp="nearest" '
'gt_interp="linear"')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugResizeDim(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugResizeDim([4, 3])
a.Apply(img)
a = ecvl.AugResizeDim([4, 3], ecvl.InterpolationType.nearest,
ecvl.InterpolationType.linear)
a.Apply(img)
# fromtext
f = ecvl.AugResizeDim if ecvl is ecvl_core else ecvl.AugResizeDim.fromtext
a = f('dims=(4, 3) interp="linear" gt_interp="nearest"')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugResizeScale(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugResizeScale([0.5, 0.5])
a.Apply(img)
a = ecvl.AugResizeScale([0.5, 0.5], ecvl.InterpolationType.nearest,
ecvl.InterpolationType.linear)
a.Apply(img)
# fromtext
f = ecvl.AugResizeScale if ecvl is ecvl_core else \
ecvl.AugResizeScale.fromtext
a = f('scale=(0.5, 0.5) interp="linear" gt_interp="nearest"')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugFlip(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugFlip(0.5)
a.Apply(img)
# fromtext
f = ecvl.AugFlip if ecvl is ecvl_core else ecvl.AugFlip.fromtext
a = f('p=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugMirror(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugMirror(0.5)
a.Apply(img)
# fromtext
f = ecvl.AugMirror if ecvl is ecvl_core else ecvl.AugMirror.fromtext
a = f('p=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugGaussianBlur(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugGaussianBlur([0.2, 0.4])
a.Apply(img)
# fromtext
f = ecvl.AugGaussianBlur if ecvl is ecvl_core else \
ecvl.AugGaussianBlur.fromtext
a = f('sigma=[0.2, 0.4]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugAdditiveLaplaceNoise(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugAdditiveLaplaceNoise([255 * 0.05, 255 * 0.09])
a.Apply(img)
# fromtext
f = ecvl.AugAdditiveLaplaceNoise if ecvl is ecvl_core else \
ecvl.AugAdditiveLaplaceNoise.fromtext
a = f('std_dev=[12.5, 23.1]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugAdditivePoissonNoise(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugAdditivePoissonNoise([2.0, 3.0])
a.Apply(img)
# fromtext
f = ecvl.AugAdditivePoissonNoise if ecvl is ecvl_core else \
ecvl.AugAdditivePoissonNoise.fromtext
a = f('lambda=[2.0, 3.0]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugGammaContrast(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugGammaContrast([3, 4])
a.Apply(img)
# fromtext
f = ecvl.AugGammaContrast if ecvl is ecvl_core else \
ecvl.AugGammaContrast.fromtext
a = f('gamma=[3, 4]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugCoarseDropout(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugCoarseDropout([0.5, 0.7], [0.1, 0.2], 0.4)
a.Apply(img)
# fromtext
f = ecvl.AugCoarseDropout if ecvl is ecvl_core else \
ecvl.AugCoarseDropout.fromtext
a = f('p=[0.5, 0.7] drop_size=[0.1, 0.2] per_channel=0.4')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugTranspose(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugTranspose(0.5)
a.Apply(img)
# fromtext
f = ecvl.AugTranspose if ecvl is ecvl_core else ecvl.AugTranspose.fromtext
a = f('p=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugBrightness(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugBrightness([30, 60])
a.Apply(img)
# fromtext
f = ecvl.AugBrightness if ecvl is ecvl_core else \
ecvl.AugBrightness.fromtext
a = f('beta=[30, 60]')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugGridDistortion(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugGridDistortion([5, 10], [-0.2, 0.2])
a.Apply(img)
a = ecvl.AugGridDistortion(
[5, 10], [-0.2, 0.2], ecvl.InterpolationType.nearest
)
a.Apply(img)
a = ecvl.AugGridDistortion(
[5, 10], [-0.2, 0.2], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101
)
a.Apply(img)
a = ecvl.AugGridDistortion(
[5, 10], [-0.2, 0.2], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101, 0
)
a.Apply(img)
# fromtext
f = ecvl.AugGridDistortion if ecvl is ecvl_core else \
ecvl.AugGridDistortion.fromtext
a = f('num_steps=[5,10] distort_limit=[-0.2,0.2] interp=\"linear\" '
'border_type=\"reflect_101\" border_value=0')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugOpticalDistortion(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugOpticalDistortion([-0.2, 0.2], [-0.4, 0.4])
a.Apply(img)
a = ecvl.AugOpticalDistortion(
[-0.2, 0.2], [-0.4, 0.4], ecvl.InterpolationType.nearest
)
a.Apply(img)
a = ecvl.AugOpticalDistortion(
[-0.2, 0.2], [-0.4, 0.4], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101
)
a.Apply(img)
a = ecvl.AugOpticalDistortion(
[-0.2, 0.2], [-0.4, 0.4], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101, 0
)
a.Apply(img)
# fromtext
f = ecvl.AugOpticalDistortion if ecvl is ecvl_core else \
ecvl.AugOpticalDistortion.fromtext
a = f('distort_limit=[-0.2,0.2] shift_limit=[-0.4,0.4] interp=\"linear\" '
'border_type=\"reflect_101\" border_value=0')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugSalt(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugSalt([0.1, 0.3], 0.5)
a.Apply(img)
# fromtext
f = ecvl.AugSalt if ecvl is ecvl_core else ecvl.AugSalt.fromtext
a = f('p=[0.1,0.3] per_channel=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugPepper(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugPepper([0.1, 0.3], 0.5)
a.Apply(img)
# fromtext
f = ecvl.AugPepper if ecvl is ecvl_core else ecvl.AugPepper.fromtext
a = f('p=[0.1,0.3] per_channel=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugSaltAndPepper(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugSaltAndPepper([0.1, 0.3], 0.5)
a.Apply(img)
# fromtext
f = ecvl.AugSaltAndPepper if ecvl is ecvl_core else \
ecvl.AugSaltAndPepper.fromtext
a = f('p=[0.1,0.3] per_channel=0.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugElasticTransform(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugElasticTransform([34, 60], [4, 6])
a.Apply(img)
a = ecvl.AugElasticTransform(
[34, 60], [4, 6], ecvl.InterpolationType.nearest
)
a.Apply(img)
a = ecvl.AugElasticTransform(
[34, 60], [4, 6], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101
)
a.Apply(img)
a = ecvl.AugElasticTransform(
[34, 60], [4, 6], ecvl.InterpolationType.nearest,
ecvl.BorderType.BORDER_REFLECT_101, 0
)
a.Apply(img)
# fromtext
f = ecvl.AugElasticTransform if ecvl is ecvl_core else \
ecvl.AugElasticTransform.fromtext
a = f('alpha=[34,60] sigma=[4,6] interp=\"linear\" '
'border_type=\"reflect_101\" border_value=0')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugNormalize(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugNormalize(20, 5.5)
a.Apply(img)
# fromtext
f = ecvl.AugNormalize if ecvl is ecvl_core else \
ecvl.AugNormalize.fromtext
a = f('mean=20 std=5.5')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugNormalize_separate(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugNormalize([20, 19, 21], [5, 5.5, 6])
a.Apply(img)
# fromtext
f = ecvl.AugNormalize if ecvl is ecvl_core else \
ecvl.AugNormalize.fromtext
a = f('mean=(20,19,21) std=(5,5.5,6)')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugCenterCrop(ecvl):
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugCenterCrop([4, 3])
a.Apply(img)
# fromtext
f = ecvl.AugCenterCrop if ecvl is ecvl_core else \
ecvl.AugCenterCrop.fromtext
a = f('size=(4,3)')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugToFloat32(ecvl):
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugToFloat32()
a.Apply(img)
assert img.elemtype_ == ecvl.DataType.float32
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugToFloat32(2.)
a.Apply(img)
assert img.elemtype_ == ecvl.DataType.float32
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugToFloat32(2., 3.)
a.Apply(img)
assert img.elemtype_ == ecvl.DataType.float32
# fromtext
f = ecvl.AugToFloat32 if ecvl is ecvl_core else \
ecvl.AugToFloat32.fromtext
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = f('divisor=2. divisor_gt=3.')
a.Apply(img)
assert img.elemtype_ == ecvl.DataType.float32
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugDivBy255(ecvl):
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugDivBy255()
a.Apply(img)
# fromtext
f = ecvl.AugDivBy255 if ecvl is ecvl_core else \
ecvl.AugDivBy255.fromtext
a = f('')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugScaleTo(ecvl):
img = ecvl.Image([8, 6, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
a = ecvl.AugScaleTo(1, 254)
a.Apply(img)
# fromtext
f = ecvl.AugScaleTo if ecvl is ecvl_core else \
ecvl.AugScaleTo.fromtext
a = f('new_min=1 new_max=255')
a.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_AugmentationFactory(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
# one arg
a = ecvl.AugmentationFactory.create('AugFlip p=0.5')
a.Apply(img)
# two args
a = ecvl.AugmentationFactory.create('AugFlip', 'p=0.5')
a.Apply(img)
# container
txt = ('SequentialAugmentationContainer\n'
'AugRotate angle=[-5,5] center=(0,0) scale=0.5 interp="linear"\n'
'AugAdditiveLaplaceNoise std_dev=[0,0.51]\n'
'AugCoarseDropout p=[0,0.55] drop_size=[0.02,0.1] per_channel=0\n'
'AugAdditivePoissonNoise lambda=[0,40]\n'
'AugResizeDim dims=(30,30) interp="linear"\n'
'end')
c = ecvl.AugmentationFactory.create(txt)
c.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_SequentialAugmentationContainer(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
# from list
c = ecvl.SequentialAugmentationContainer([
ecvl.AugRotate([-5, 5]),
ecvl.AugMirror(.5),
])
c.Apply(img)
# fromtext
txt = ('AugFlip p=0.2\n'
'AugMirror p=0.2\n'
'end')
f = ecvl.SequentialAugmentationContainer if ecvl is ecvl_core else \
ecvl.SequentialAugmentationContainer.fromtext
c = f(txt)
c.Apply(img)
@pytest.mark.parametrize("ecvl", [ecvl_core, ecvl_py])
def test_OneOfAugmentationContainer(ecvl):
img = ecvl.Image([5, 4, 3], ecvl.DataType.uint8, "xyc", ecvl.ColorType.BGR)
# from list
c = ecvl.OneOfAugmentationContainer(0.7, [
ecvl.AugRotate([-5, 5]),
ecvl.AugMirror(.5),
])
c.Apply(img)
# fromtext
txt = ('p=0.7\n'
'AugFlip p=0.2\n'
'AugMirror p=0.2\n'
'end')
f = ecvl.OneOfAugmentationContainer if ecvl is ecvl_core else \
ecvl.OneOfAugmentationContainer.fromtext
c = f(txt)
c.Apply(img)
|
py | 1a495cec59dabd17eb4146eab54a1be620885927 | import os
from torchnlp.download import download_file_maybe_extract
def wmt_dataset(directory='data/wmt16_en_de',
train=False,
dev=False,
test=False,
train_filename='train.tok.clean.bpe.32000',
dev_filename='newstest2013.tok.bpe.32000',
test_filename='newstest2014.tok.bpe.32000',
check_files=['train.tok.clean.bpe.32000.en'],
url='https://drive.google.com/uc?export=download&id=0B_bZck-ksdkpM25jRUN2X2UxMm8'):
"""
The Workshop on Machine Translation (WMT) 2014 English-German dataset.
Initially this dataset was preprocessed by Google Brain. Though this download contains test sets
from 2015 and 2016, the train set differs slightly from WMT 2015 and 2016 and significantly from
WMT 2017.
The provided data is mainly taken from version 7 of the Europarl corpus, which is freely
available. Note that this the same data as last year, since Europarl is not anymore translted
across all 23 official European languages. Additional training data is taken from the new News
Commentary corpus. There are about 50 million words of training data per language from the
Europarl corpus and 3 million words from the News Commentary corpus.
A new data resource from 2013 is the Common Crawl corpus which was collected from web sources.
Each parallel corpus comes with a annotation file that gives the source of each sentence pair.
References:
* https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/data_generators/translate_ende.py # noqa: E501
* http://www.statmt.org/wmt14/translation-task.html
Args:
directory (str, optional): Directory to cache the dataset.
train (bool, optional): If to load the training split of the dataset.
dev (bool, optional): If to load the dev split of the dataset.
test (bool, optional): If to load the test split of the dataset.
train_filename (str, optional): The filename of the training split.
dev_filename (str, optional): The filename of the dev split.
test_filename (str, optional): The filename of the test split.
check_files (str, optional): Check if these files exist, then this download was successful.
url (str, optional): URL of the dataset `tar.gz` file.
Returns:
:class:`tuple` of :class:`iterable` or :class:`iterable`:
Returns between one and all dataset splits (train, dev and test) depending on if their
respective boolean argument is ``True``.
Example:
>>> from torchnlp.datasets import wmt_dataset # doctest: +SKIP
>>> train = wmt_dataset(train=True) # doctest: +SKIP
>>> train[:2] # doctest: +SKIP
[{
'en': 'Res@@ um@@ ption of the session',
'de': 'Wiederaufnahme der Sitzungsperiode'
}, {
'en': 'I declare resumed the session of the European Parliament ad@@ jour@@ ned on...'
'de': 'Ich erklär@@ e die am Freitag , dem 17. Dezember unterbro@@ ch@@ ene...'
}]
"""
download_file_maybe_extract(
url=url, directory=directory, check_files=check_files, filename='wmt16_en_de.tar.gz')
ret = []
splits = [(train, train_filename), (dev, dev_filename), (test, test_filename)]
splits = [f for (requested, f) in splits if requested]
for filename in splits:
examples = []
en_path = os.path.join(directory, filename + '.en')
de_path = os.path.join(directory, filename + '.de')
en_file = [l.strip() for l in open(en_path, 'r', encoding='utf-8')]
de_file = [l.strip() for l in open(de_path, 'r', encoding='utf-8')]
assert len(en_file) == len(de_file)
for i in range(len(en_file)):
if en_file[i] != '' and de_file[i] != '':
examples.append({'en': en_file[i], 'de': de_file[i]})
ret.append(examples)
if len(ret) == 1:
return ret[0]
else:
return tuple(ret)
|
py | 1a495d50f3ca0f3736175613ecf4993dbf2fdd15 | # -*- coding: utf-8 -*-
import rssit.config
import rssit.globals
def parse_args(args):
url = None
rssit.globals.config["config"]["args"] = {}
for arg in args[1:]:
if len(arg) > 0 and arg[0] == '/':
url = arg
continue
if "=" not in arg:
# ???
continue
eq = arg.index("=")
key = arg[:eq]
value = rssit.config.parse_value_simple(arg[eq + 1:])
rssit.globals.config["config"]["args"][key] = value
return url
|
py | 1a495d6d903e306002501d23d6f53dbc1294069f | #!/usr/local/bin/python
"""
Xylinq schema setup for MySQL(tables creation).
$Header: /home/inqwell/cvsroot/dev/scripts/python/setupMySqlXySchema.py,v 1.1 2009/05/22 22:15:51 sanderst Exp $
$Author: sanderst $
$DateTime: 2009/04/22 14:59:21 $
$Change: 164836 $
"""
import glob
from optparse import OptionParser
import os
from DBUtils.mysql_schema_setup import MySqlSchemaSetup
def main(args):
parser = OptionParser()
parser.add_option("--dbserver", dest="db_server", help="Database server")
parser.add_option("--db", dest="db", help="Database")
parser.add_option("--dbuser", dest="db_user", help="Database user")
parser.add_option("--dbpassword", dest="db_password", help="Database password")
parser.add_option("--dirs", dest="dir_list", help="List of directories for the SQL files")
parser.add_option("--no_fk", dest="do_foreign_keys", action="store_false", default=True,
help="Ignore foreign key constraints")
parser.add_option("--test", dest="test_mode", action="store_true", default=False,
help="Test mode")
options, dummy = parser.parse_args()
test_mode = options.test_mode
db_server = options.db_server
if db_server is None and not test_mode:
raise Exception("Missing mandatory 'dbserver' argument")
db = options.db
if db is None and not test_mode:
raise Exception("Missing mandatory 'db' argument")
db_user = options.db_user
if db_user is None and not test_mode:
raise Exception("Missing mandatory 'dbuser' argument")
db_password = options.db_password
if db_password is None and not test_mode:
raise Exception("Missing mandatory 'dbpassword' argument")
dir_list = options.dir_list
if dir_list is None:
raise Exception("Missing mandatory 'dirs' argument")
dir_list = [dir_path.strip() for dir_path in dir_list.split(",")]
do_foreign_keys = options.do_foreign_keys
# Get the table creation SQL texts
filenames = []
for dir in dir_list:
path = os.path.join(dir, "*.sql")
filenames.extend(glob.glob(path))
table_text_list = []
for filename in filenames:
fh = open(filename)
try:
table_text = fh.read()
finally:
fh.close()
table_text_list.append(table_text)
# Create the tables
if not test_mode:
import MySQLdb
dbh = MySQLdb.connect(host=db_server, db=db, user=db_user, passwd=db_password)
db_cursor = dbh.cursor()
else:
db_cursor = None
setup = MySqlSchemaSetup()
setup.create_tables(db_cursor, table_text_list, do_foreign_keys=do_foreign_keys,
test_mode=test_mode)
if not test_mode:
db_cursor.close()
dbh.commit()
dbh.close()
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv))
|
py | 1a495def559d940cc0587821d978913272182b68 | import argparse
import json
import os
import subprocess
import sys
import time
import copy
root_account = "codex"
def system_account(sub_name):
return root_account + "." + sub_name
relay_account_name = system_account("relay")
token_account_name = system_account("token")
code_account_name = system_account("code")
match_account_name = system_account("match")
bridge_account_name = system_account("bridge")
relay_token_name = "relay.token"
class dataSet(object):
"""data for forceio script"""
def __init__(self):
self.initAccounts = []
self.initProducers = []
self.initProducerSigKeys = []
self.initAccountsKeys = []
self.maxClients = 0
self.logFile = {}
self.args = {}
def processData(self, args):
self.args = args
self.config_dir = os.path.abspath(args.data_dir) + '/config/'
self.nodes_dir = os.path.abspath(args.data_dir) + '/nodes/'
self.wallet_dir = os.path.abspath(args.data_dir) + '/wallet/'
self.log_path = os.path.abspath(args.data_dir) + '/' + args.log_path
self.contracts_dir = args.contracts_dir
run('mkdir -p ' + os.path.abspath(args.data_dir))
self.logFile = open(self.log_path, 'a')
self.logFile.write('\n\n' + '*' * 80 + '\n\n\n')
datas = dataSet()
def jsonArg(a):
return " '" + json.dumps(a) + "' "
def log2File(l):
print('forceio script:', l)
if datas.logFile != {}:
datas.logFile.write(l + '\n')
def run(args):
log2File(args)
if subprocess.call(args, shell=True):
print('bios-boot-eosforce.py: exiting because of error')
sys.exit(1)
def rm(path_to_del):
run('rm -rf ' + path_to_del)
def retry(args):
while True:
log2File(args)
if subprocess.call(args, shell=True):
print('*** Retry')
else:
break
def background(args):
log2File(args)
return subprocess.Popen(args, shell=True)
def sleep(t):
print('sleep', t, '...')
time.sleep(t)
print('resume')
def replaceFile(file, old, new):
try:
f = open(file,'r+')
all_lines = f.readlines()
f.seek(0)
f.truncate()
for line in all_lines:
line = line.replace(old, new)
f.write(line)
f.close()
except Exception,e:
print('bios-boot-eosforce.py: replace %s frome %s to %s err by ' % (file, old, new))
print(e)
sys.exit(1)
def cleos(cmd):
run(datas.args.cleos + cmd)
def intToCurrency(i):
return '%d.%04d %s' % (i // 10000, i % 10000, datas.args.symbol)
def pushAction(account, action, auth, data ):
cleos("push action %s %s '%s' -p %s" % (account, action, data, auth))
def setFuncStartBlock(func_typ, num):
pushAction(root_account, "setconfig", "codex.config",
'{"typ":"%s","num":%s,"key":"","fee":"%s"}' % ( func_typ, num, intToCurrency(0)))
def setFee(account, act, fee, cpu, net, ram):
cleos(
'set setfee ' +
('%s %s ' % (account, act)) +
'"' + intToCurrency(fee) + '" ' +
('%d %d %d' % (cpu, net, ram)))
def getRAM(account, ram):
cleos("push action %s freeze '{\"voter\":\"%s\", \"stake\":\"%s\"}' -p %s" % (root_account, account, intToCurrency(ram), account))
cleos("push action %s vote4ram '{\"voter\":\"%s\",\"bpname\":\"codex.bpa\",\"stake\":\"%s\"}' -p %s" % (root_account, account, intToCurrency(ram), account))
def setContract(account):
getRAM(account, 50000 * 10000)
cleos('set contract %s %s/%s/' % (account, datas.config_dir, account))
def setContractByPath(account, path):
getRAM(account, 50000 * 10000)
cleos('set contract %s %s/%s/' % (account, datas.config_dir, path))
def parserArgsAndRun(parser, commands):
parser.add_argument('--root', metavar='', help="Eosforce root dir from git", default='../../')
parser.add_argument('--contracts-dir', metavar='', help="Path to contracts directory", default='tutorials/genesis/')
parser.add_argument('--log-path', metavar='', help="Path to log file", default='output.log')
parser.add_argument('--nodes-dir', metavar='', help="Path to nodes directory", default='./nodes/')
parser.add_argument('--wallet-dir', metavar='', help="Path to wallet directory", default='./wallet/')
parser.add_argument('--config-dir', metavar='', help="Path to config directory", default='./config')
parser.add_argument('--data-dir', metavar='', help="Path to datas", default='./testnet')
parser.add_argument('--symbol', metavar='', help="The core symbol", default='CDX')
parser.add_argument('--pr', metavar='', help="The Public Key Start Symbol", default='CDX')
parser.add_argument('-a', '--all', action='store_true', help="Do everything marked with (*)")
parser.add_argument('--use-port', metavar='', help="port X to listen, http X001-X099, p2p X101-X199 and wallet X666", default='8')
for (flag, command, function, inAll, help) in commands:
prefix = ''
if inAll: prefix += '*'
if prefix: help = '(' + prefix + ') ' + help
if flag:
parser.add_argument('-' + flag, '--' + command, action='store_true', help=help, dest=command)
else:
parser.add_argument('--' + command, action='store_true', help=help, dest=command)
args = parser.parse_args()
args.use_port = int(args.use_port)
if args.use_port >= 50 or args.use_port <= 4 :
print("args --use-port should between 5-50")
sys.exit(1)
args.cleos += ' --wallet-url http://127.0.0.1:%d666' % args.use_port
args.cleos += ' --url http://127.0.0.1:%d001 ' % args.use_port
args.cleos = args.root + args.cleos
args.nodeos = args.root + args.nodeos
args.keosd = args.root + args.keosd
args.contracts_dir = args.root + args.contracts_dir
global datas
datas.processData(args)
haveCommand = False
for (flag, command, function, inAll, help) in commands:
if getattr(args, command) or inAll and args.all:
if function:
haveCommand = True
function()
if not haveCommand:
print('bios-boot-eosforce.py: Tell me what to do. -a does almost everything. -h shows options.') |
py | 1a495e4af60a288b434c944de4c5e07123c6f00a | import sys
import gym.spaces
import itertools
import numpy as np
import random
import tensorflow as tf
import tensorflow.contrib.layers as layers
from collections import namedtuple
from dqn_utils import *
OptimizerSpec = namedtuple("OptimizerSpec", ["constructor", "kwargs", "lr_schedule"])
def learn(env,
q_func,
optimizer_spec,
session,
exploration=LinearSchedule(1000000, 0.1),
stopping_criterion=None,
replay_buffer_size=1000000,
batch_size=32,
gamma=0.99,
learning_starts=50000,
learning_freq=4,
frame_history_len=4,
target_update_freq=10000,
grad_norm_clipping=10):
"""Run Deep Q-learning algorithm.
You can specify your own convnet using q_func.
All schedules are w.r.t. total number of steps taken in the environment.
Parameters
----------
env: gym.Env
gym environment to train on.
q_func: function
Model to use for computing the q function. It should accept the
following named arguments:
img_in: tf.Tensor
tensorflow tensor representing the input image
num_actions: int
number of actions
scope: str
scope in which all the model related variables
should be created
reuse: bool
whether previously created variables should be reused.
optimizer_spec: OptimizerSpec
Specifying the constructor and kwargs, as well as learning rate schedule
for the optimizer
session: tf.Session
tensorflow session to use.
exploration: rl_algs.deepq.utils.schedules.Schedule
schedule for probability of chosing random action.
stopping_criterion: (env, t) -> bool
should return true when it's ok for the RL algorithm to stop.
takes in env and the number of steps executed so far.
replay_buffer_size: int
How many memories to store in the replay buffer.
batch_size: int
How many transitions to sample each time experience is replayed.
gamma: float
Discount Factor
learning_starts: int
After how many environment steps to start replaying experiences
learning_freq: int
How many steps of environment to take between every experience replay
frame_history_len: int
How many past frames to include as input to the model.
target_update_freq: int
How many experience replay rounds (not steps!) to perform between
each update to the target Q network
grad_norm_clipping: float or None
If not None gradients' norms are clipped to this value.
"""
assert type(env.observation_space) == gym.spaces.Box
assert type(env.action_space) == gym.spaces.Discrete
###############
# BUILD MODEL #
###############
if len(env.observation_space.shape) == 1:
# This means we are running on low-dimensional observations (e.g. RAM)
input_shape = env.observation_space.shape
else:
img_h, img_w, img_c = env.observation_space.shape
input_shape = (img_h, img_w, frame_history_len * img_c)
num_actions = env.action_space.n
# set up placeholders
# placeholder for current observation (or state)
obs_t_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))
# placeholder for current action
act_t_ph = tf.placeholder(tf.int32, [None])
# placeholder for current reward
rew_t_ph = tf.placeholder(tf.float32, [None])
# placeholder for next observation (or state)
obs_tp1_ph = tf.placeholder(tf.uint8, [None] + list(input_shape))
# placeholder for end of episode mask
# this value is 1 if the next state corresponds to the end of an episode,
# in which case there is no Q-value at the next state; at the end of an
# episode, only the current state reward contributes to the target, not the
# next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)
done_mask_ph = tf.placeholder(tf.float32, [None])
# casting to float on GPU ensures lower data transfer times.
obs_t_float = tf.cast(obs_t_ph, tf.float32) / 255.0
obs_tp1_float = tf.cast(obs_tp1_ph, tf.float32) / 255.0
# Here, you should fill in your own code to compute the Bellman error. This requires
# evaluating the current and next Q-values and constructing the corresponding error.
# TensorFlow will differentiate this error for you, you just need to pass it to the
# optimizer. See assignment text for details.
# Your code should produce one scalar-valued tensor: total_error
# This will be passed to the optimizer in the provided code below.
# Your code should also produce two collections of variables:
# q_func_vars
# target_q_func_vars
# These should hold all of the variables of the Q-function network and target network,
# respectively. A convenient way to get these is to make use of TF's "scope" feature.
# For example, you can create your Q-function network with the scope "q_func" like this:
# <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
# And then you can obtain the variables like this:
# q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
# Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES"
######
# YOUR CODE HERE
######
q_net = q_func(obs_t_float, num_actions, scope='q_func', reuse=False) # Q(s,prev_a)
target_q_net = q_func(obs_tp1_float, num_actions, scope='target_q_func', reuse=False)
q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')
one_hot_act = tf.one_hot(act_t_ph, depth=num_actions, dtype=tf.float32, name="action_one_hot")
x = (rew_t_ph + gamma * tf.reduce_max(target_q_net) - tf.reduce_sum(one_hot_act*q_net,axis=1))
total_error = tf.square(x)
# construct optimization op (with gradient clipping)
learning_rate = tf.placeholder(tf.float32, (), name="learning_rate")
optimizer = optimizer_spec.constructor(learning_rate=learning_rate, **optimizer_spec.kwargs)
train_fn = minimize_and_clip(optimizer, total_error,
var_list=q_func_vars, clip_val=grad_norm_clipping)
# update_target_fn will be called periodically to copy Q network to target Q network
update_target_fn = []
for var, var_target in zip(sorted(q_func_vars, key=lambda v: v.name),
sorted(target_q_func_vars, key=lambda v: v.name)):
update_target_fn.append(var_target.assign(var))
update_target_fn = tf.group(*update_target_fn)
# construct the replay buffer
replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len)
###############
# RUN ENV #
###############
model_initialized = False
num_param_updates = 0
mean_episode_reward = -float('nan')
best_mean_episode_reward = -float('inf')
last_obs = env.reset()
LOG_EVERY_N_STEPS = 10000
for t in itertools.count():
### 1. Check stopping criterion
if stopping_criterion is not None and stopping_criterion(env, t):
break
### 2. Step the env and store the transition
# At this point, "last_obs" contains the latest observation that was
# recorded from the simulator. Here, your code needs to store this
# observation and its outcome (reward, next observation, etc.) into
# the replay buffer while stepping the simulator forward one step.
# At the end of this block of code, the simulator should have been
# advanced one step, and the replay buffer should contain one more
# transition.
# Specifically, last_obs must point to the new latest observation.
# Useful functions you'll need to call:
# obs, reward, done, info = env.step(action)
# this steps the environment forward one step
# obs = env.reset()
# this resets the environment if you reached an episode boundary.
# Don't forget to call env.reset() to get a new observation if done
# is true!!
# Note that you cannot use "last_obs" directly as input
# into your network, since it needs to be processed to include context
# from previous frames. You should check out the replay buffer
# implementation in dqn_utils.py to see what functionality the replay
# buffer exposes. The replay buffer has a function called
# encode_recent_observation that will take the latest observation
# that you pushed into the buffer and compute the corresponding
# input that should be given to a Q network by appending some
# previous frames.
# Don't forget to include epsilon greedy exploration!
# And remember that the first time you enter this loop, the model
# may not yet have been initialized (but of course, the first step
# might as well be random, since you haven't trained your net...)
#####
# YOUR CODE HERE
#####
idx = replay_buffer.store_frame(last_obs)
eps = exploration.value(t)
if model_initialized and random.random() > eps:
q_input = replay_buffer.encode_recent_observation()
q_input = np.expand_dims(q_input,axis=0)
action = np.argmax(session.run(q_net,feed_dict = {obs_t_float:q_input}))
else:
action = env.action_space.sample() # exploration
last_obs, reward, done, info = env.step(action)
replay_buffer.store_effect(idx, action, reward, done)
if done:
last_obs = env.reset()
# at this point, the environment should have been advanced one step (and
# reset if done was true), and last_obs should point to the new latest
# observation
### 3. Perform experience replay and train the network.
# note that this is only done if the replay buffer contains enough samples
# for us to learn something useful -- until then, the model will not be
# initialized and random actions should be taken
if (t > learning_starts and
t % learning_freq == 0 and
replay_buffer.can_sample(batch_size)):
# Here, you should perform training. Training consists of four steps:
# 3.a: use the replay buffer to sample a batch of transitions (see the
# replay buffer code for function definition, each batch that you sample
# should consist of current observations, current actions, rewards,
# next observations, and done indicator).
# 3.b: initialize the model if it has not been initialized yet; to do
# that, call
# initialize_interdependent_variables(session, tf.global_variables(), {
# obs_t_ph: obs_t_batch,
# obs_tp1_ph: obs_tp1_batch,
# })
# where obs_t_batch and obs_tp1_batch are the batches of observations at
# the current and next time step. The boolean variable model_initialized
# indicates whether or not the model has been initialized.
# Remember that you have to update the target network too (see 3.d)!
# 3.c: train the model. To do this, you'll need to use the train_fn and
# total_error ops that were created earlier: total_error is what you
# created to compute the total Bellman error in a batch, and train_fn
# will actually perform a gradient step and update the network parameters
# to reduce total_error. When calling session.run on these you'll need to
# populate the following placeholders:
# obs_t_ph
# act_t_ph
# rew_t_ph
# obs_tp1_ph
# done_mask_ph
# (this is needed for computing total_error)
# learning_rate -- you can get this from optimizer_spec.lr_schedule.value(t)
# (this is needed by the optimizer to choose the learning rate)
# 3.d: periodically update the target network by calling
# session.run(update_target_fn)
# you should update every target_update_freq steps, and you may find the
# variable num_param_updates useful for this (it was initialized to 0)
#####
# YOUR CODE HERE
#####
obs_t_batch, act_batch, rew_batch, obs_tp1_batch, done_mask = replay_buffer.sample(batch_size)
if not model_initialized:
initialize_interdependent_variables(session, tf.global_variables(), {
obs_t_ph: obs_t_batch,
obs_tp1_ph: obs_tp1_batch,
})
model_initialized = True
session.run(train_fn, feed_dict={
obs_t_ph:obs_t_batch,
act_t_ph:act_batch,
rew_t_ph:rew_batch,
obs_tp1_ph:obs_tp1_batch,
done_mask_ph:done_mask,
learning_rate:optimizer_spec.lr_schedule.value(t)})
if num_param_updates % target_update_freq == 0:
session.run(update_target_fn)
### 4. Log progress
episode_rewards = get_wrapper_by_name(env, "Monitor").get_episode_rewards()
if len(episode_rewards) > 0:
mean_episode_reward = np.mean(episode_rewards[-100:])
if len(episode_rewards) > 100:
best_mean_episode_reward = max(best_mean_episode_reward, mean_episode_reward)
if t % LOG_EVERY_N_STEPS == 0 and model_initialized:
print("Timestep %d" % (t,))
print("mean reward (100 episodes) %f" % mean_episode_reward)
print("best mean reward %f" % best_mean_episode_reward)
print("episodes %d" % len(episode_rewards))
print("exploration %f" % exploration.value(t))
print("learning_rate %f" % optimizer_spec.lr_schedule.value(t))
sys.stdout.flush()
|
py | 1a495e90298b3c31de54fdc499efed3a7e2bd152 | # -*- coding: utf-8 -*-
"""
Module with logic for the Environment sub-process
"""
__author__ = 'Samir Adrik'
__email__ = '[email protected]'
from source.util import Assertor, Tracking, Debugger
from .finn_environment_process import FinnEnvironmentProcess
from .engine import SubModel
class FinnEnvironmentSubModel(SubModel):
"""
Implementation of Handler for Environmental statistics
"""
@Tracking
def __init__(self, environmental_data: dict):
"""
Constructor / Instantiate the class.
Parameters
----------
environmental_data : dict
dict with family statistics
"""
Assertor.assert_data_types([environmental_data], [dict])
self.name = FinnEnvironmentSubModel.__name__
super().__init__(name=self.name, desc="Processing Finn Environmental Statistics")
self.environmental_data = environmental_data
@Debugger
def run(self):
"""
method for running the environmental data sub model
"""
environment_process = FinnEnvironmentProcess(self.environmental_data)
return environment_process.environment_statistics
|
py | 1a495f859c0012d2bb98ca6a15bda0d1eb3deb08 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
http://developer.openstack.org/api-ref-identity-v3.html#groups-v3
"""
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
class GroupsClient(rest_client.RestClient):
api_version = "v3"
def create_group(self, **kwargs):
"""Creates a group.
Available params: see http://developer.openstack.org/
api-ref-identity-v3.html#createGroup
"""
post_body = json.dumps({'group': kwargs})
resp, body = self.post('groups', post_body)
self.expected_success(201, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def show_group(self, group_id):
"""Get group details."""
resp, body = self.get('groups/%s' % group_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def list_groups(self):
"""Lists the groups."""
resp, body = self.get('groups')
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def update_group(self, group_id, **kwargs):
"""Updates a group.
Available params: see http://developer.openstack.org/
api-ref-identity-v3.html#updateGroup
"""
post_body = json.dumps({'group': kwargs})
resp, body = self.patch('groups/%s' % group_id, post_body)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_group(self, group_id):
"""Delete a group."""
resp, body = self.delete('groups/%s' % str(group_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def add_group_user(self, group_id, user_id):
"""Add user into group."""
resp, body = self.put('groups/%s/users/%s' % (group_id, user_id),
None)
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def list_group_users(self, group_id):
"""List users in group."""
resp, body = self.get('groups/%s/users' % group_id)
self.expected_success(200, resp.status)
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
def delete_group_user(self, group_id, user_id):
"""Delete user in group."""
resp, body = self.delete('groups/%s/users/%s' % (group_id, user_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp, body)
def check_group_user_existence(self, group_id, user_id):
"""Check user in group."""
resp, body = self.head('groups/%s/users/%s' % (group_id, user_id))
self.expected_success(204, resp.status)
return rest_client.ResponseBody(resp)
|
py | 1a495fb2faa002a01410221b8ba2b30a0aac4f9e |
import sys
import inspect
from scenic import scenarioFromString
from scenic.core.simulators import DummySimulator, RejectSimulationException
import scenic.syntax.veneer as veneer
## Scene generation utilities
# Compilation
def compileScenic(code, removeIndentation=True, scenario=None):
if removeIndentation:
# to allow indenting code to line up with test function
code = inspect.cleandoc(code)
checkVeneerIsInactive()
scenario = scenarioFromString(code, scenario=scenario)
checkVeneerIsInactive()
return scenario
# Static scenes
def sampleScene(scenario, maxIterations=1):
return generateChecked(scenario, maxIterations)[0]
def sampleSceneFrom(code, maxIterations=1, scenario=None):
scenario = compileScenic(code, scenario=scenario)
return sampleScene(scenario, maxIterations=maxIterations)
def sampleEgo(scenario, maxIterations=1):
scene, iterations = generateChecked(scenario, maxIterations)
return scene.egoObject
def sampleEgoFrom(code, maxIterations=1):
scenario = compileScenic(code)
return sampleEgo(scenario, maxIterations=maxIterations)
def sampleParamP(scenario, maxIterations=1):
scene, iterations = generateChecked(scenario, maxIterations)
return scene.params['p']
def sampleParamPFrom(code, maxIterations=1):
scenario = compileScenic(code)
return sampleParamP(scenario, maxIterations=maxIterations)
# Dynamic simulations
def sampleEgoActions(scenario, maxIterations=1, maxSteps=1, maxScenes=1,
singleAction=True, timestep=1):
allActions = sampleActions(scenario, maxIterations, maxSteps, maxScenes,
singleAction, asMapping=False, timestep=timestep)
return [actions[0] for actions in allActions]
def sampleEgoActionsFromScene(scene, maxIterations=1, maxSteps=1, singleAction=True, timestep=1):
allActions = sampleActionsFromScene(scene, maxIterations=maxIterations, maxSteps=maxSteps,
singleAction=singleAction, asMapping=False,
timestep=timestep)
if allActions is None:
return None
return [actions[0] for actions in allActions]
def sampleActions(scenario, maxIterations=1, maxSteps=1, maxScenes=1,
singleAction=True, asMapping=False, timestep=1):
for i in range(maxScenes):
scene, iterations = generateChecked(scenario, maxIterations)
actions = sampleActionsFromScene(scene, maxIterations=maxIterations, maxSteps=maxSteps,
singleAction=singleAction, asMapping=asMapping,
timestep=timestep)
if actions is not None:
return actions
raise RejectSimulationException(
f'unable to find successful simulation over {maxScenes} scenes')
def sampleActionsFromScene(scene, maxIterations=1, maxSteps=1,
singleAction=True, asMapping=False, timestep=1):
sim = DummySimulator(timestep=timestep)
simulation = sim.simulate(scene, maxSteps=maxSteps, maxIterations=maxIterations)
if not simulation:
return None
actionSequence = simulation.result.actions
if singleAction:
for i, allActions in enumerate(actionSequence):
for agent, actions in allActions.items():
assert len(actions) <= 1
allActions[agent] = actions[0] if actions else None
if asMapping:
return actionSequence
else:
return [tuple(actions.values()) for actions in actionSequence]
def sampleTrajectory(scenario, maxIterations=1, maxSteps=1, maxScenes=1,
raiseGuardViolations=False):
for i in range(maxScenes):
scene, iterations = generateChecked(scenario, maxIterations)
trajectory = sampleTrajectoryFromScene(scene, maxIterations=maxIterations,
maxSteps=maxSteps,
raiseGuardViolations=raiseGuardViolations)
if trajectory is not None:
return trajectory
raise RejectSimulationException(
f'unable to find successful simulation over {maxScenes} scenes')
def sampleResult(scenario, maxIterations=1, maxSteps=1, maxScenes=1):
for i in range(maxScenes):
scene, iterations = generateChecked(scenario, maxIterations)
result = sampleResultFromScene(scene, maxIterations=maxIterations,
maxSteps=maxSteps)
if result is not None:
return result
raise RejectSimulationException(
f'unable to find successful simulation over {maxScenes} scenes')
def sampleResultFromScene(scene, maxIterations=1, maxSteps=1, raiseGuardViolations=False):
sim = DummySimulator(timestep=1)
simulation = sim.simulate(scene, maxSteps=maxSteps, maxIterations=maxIterations,
raiseGuardViolations=raiseGuardViolations)
if not simulation:
return None
return simulation.result
def sampleTrajectoryFromScene(scene, maxIterations=1, maxSteps=1, raiseGuardViolations=False):
result = sampleResultFromScene(scene, maxIterations=maxIterations, maxSteps=maxSteps,
raiseGuardViolations=raiseGuardViolations)
if not result:
return None
return result.trajectory
# Helpers
def generateChecked(scenario, maxIterations):
checkVeneerIsInactive()
scene, iterations = scenario.generate(maxIterations=maxIterations)
checkVeneerIsInactive()
return scene, iterations
def checkVeneerIsInactive():
assert veneer.activity == 0
assert not veneer.scenarioStack
assert not veneer.currentScenario
assert not veneer.evaluatingRequirement
assert not veneer.evaluatingGuard
assert not veneer.scenarios
assert not veneer._globalParameters
assert not veneer.lockedParameters
assert not veneer.lockedModel
assert not veneer.currentSimulation
assert not veneer.currentBehavior
## Error checking utilities
def checkErrorLineNumber(line, exc_info=None):
if exc_info is None:
tb = sys.exc_info()[2]
else:
tb = exc_info.tb
while tb.tb_next is not None:
tb = tb.tb_next
assert tb.tb_lineno == line
|
py | 1a496150de2f57d0a97ce0984e8866a08d0c482f | # Created by Ilia
# https://www.tensorflow.org/tutorials/keras/regression#the_auto_mpg_dataset
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers.experimental import preprocessing
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'
column_names = ['MPG', 'Cylinders', 'Displacement', 'Horsepower', 'Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(url, names=column_names,
na_values='?', comment='\t',
sep=' ', skipinitialspace=True)
dataset = raw_dataset.copy()
print(dataset.tail())
dataset = dataset.dropna()
dataset['Origin'] = dataset['Origin'].map({1: 'USA', 2: 'Europe', 3: 'Japan'})
dataset = pd.get_dummies(dataset, prefix='', prefix_sep='')
print(dataset.tail())
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
train_features = train_dataset.copy()
test_features = test_dataset.copy()
train_labels = train_features.pop('MPG')
test_labels = test_features.pop('MPG')
normalizer = preprocessing.Normalization()
normalizer.adapt(np.array(train_features))
def build_and_compile_model(norm):
model = keras.Sequential([
norm,
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
model.compile(loss='mean_absolute_error',
optimizer=tf.keras.optimizers.Adam(0.001))
return model
dnn_model = build_and_compile_model(normalizer)
dnn_model.summary()
history = dnn_model.fit(
train_features, train_labels,
validation_split=0.2,
verbose=0, epochs=100)
def plot_loss(history):
plt.plot(history.history['loss'], label='loss')
plt.plot(history.history['val_loss'], label='val_loss')
plt.ylim([0, 10])
plt.xlabel('Epoch')
plt.ylabel('Error [MPG]')
plt.legend()
plt.grid(True)
plot_loss(history)
plt.show()
dnn_model.evaluate(test_features, test_labels, verbose=0)
# Make Predictions
test_predictions = dnn_model.predict(test_features).flatten()
a = plt.axes(aspect='equal')
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
lims = [0, 50]
plt.xlim(lims)
plt.ylim(lims)
_ = plt.plot(lims, lims)
plt.show()
# Error distribution
error = test_predictions - test_labels
plt.hist(error, bins=25)
plt.xlabel('Prediction Error [MPG]')
_ = plt.ylabel('Count')
plt.show()
# Save model
# dnn_model.save('dnn_model')
# Load model
# reloaded = tf.keras.models.load_model('dnn_model')
|
py | 1a49619fb2ff0f8b9ffe1d7aa7e8088d9b73d72e | import unittest
from unittest import mock
from io import StringIO
from snowfakery.data_generator import generate
class TestGenerateMapping(unittest.TestCase):
@mock.patch("snowfakery.output_streams.DebugOutputStream.write_row")
def test_empty_string(self, write_row):
yaml = """
- object: Foo
fields:
bar: ""
"""
generate(StringIO(yaml))
print(write_row.mock_calls[0][1][1]["bar"] == "")
|
py | 1a49637af9c3a805dbc072d9e6fe7851f05767e7 | """Test accessor functions by ensuring accessor and xroms
functions return same values."""
import cartopy
import numpy as np
import xarray as xr
from xgcm import grid as xgrid
import xroms
grid1 = xr.open_dataset("xroms/tests/input/grid.nc")
# ds = xroms.open_netcdf('xroms/tests/input/ocean_his_0001.nc')
ds = xr.open_dataset("xroms/tests/input/ocean_his_0001.nc")
# combine the two:
ds = ds.merge(grid1, overwrite_vars=True, compat="override")
ds, grid = xroms.roms_dataset(ds)
axesTZYX = ["T", "Z", "Y", "X"]
axesTYX = ["T", "Y", "X"]
coordnamesTZYX = ["time", "vertical", "latitude", "longitude"]
coordnamesTYX = ["time", "latitude", "longitude"]
dim_dict = {
"rho": {
"s_rho": ["ocean_time", "s_rho", "eta_rho", "xi_rho"],
"s_w": ["ocean_time", "s_w", "eta_rho", "xi_rho"],
None: ["ocean_time", "eta_rho", "xi_rho"],
},
"u": {
"s_rho": ["ocean_time", "s_rho", "eta_rho", "xi_u"],
"s_w": ["ocean_time", "s_w", "eta_rho", "xi_u"],
None: ["ocean_time", "eta_rho", "xi_u"],
},
"v": {
"s_rho": ["ocean_time", "s_rho", "eta_v", "xi_rho"],
"s_w": ["ocean_time", "s_w", "eta_v", "xi_rho"],
None: ["ocean_time", "eta_v", "xi_rho"],
},
"psi": {
"s_rho": ["ocean_time", "s_rho", "eta_v", "xi_u"],
"s_w": ["ocean_time", "s_w", "eta_v", "xi_u"],
None: ["ocean_time", "eta_v", "xi_u"],
},
}
coord_dict = {
"rho": {
"s_rho": ["ocean_time", "z_rho", "lat_rho", "lon_rho"],
"s_w": ["ocean_time", "z_w", "lat_rho", "lon_rho"],
None: ["ocean_time", "lat_rho", "lon_rho"],
},
"u": {
"s_rho": ["ocean_time", "z_rho_u", "lat_u", "lon_u"],
"s_w": ["ocean_time", "z_w_u", "lat_u", "lon_u"],
None: ["ocean_time", "lat_u", "lon_u"],
},
"v": {
"s_rho": ["ocean_time", "z_rho_v", "lat_v", "lon_v"],
"s_w": ["ocean_time", "z_w_v", "lat_v", "lon_v"],
None: ["ocean_time", "lat_v", "lon_v"],
},
"psi": {
"s_rho": ["ocean_time", "z_rho_psi", "lat_psi", "lon_psi"],
"s_w": ["ocean_time", "z_w_psi", "lat_psi", "lon_psi"],
None: ["ocean_time", "lat_psi", "lon_psi"],
},
}
def test_grid():
assert isinstance(ds.xroms.grid, xgrid.Grid)
def test_speed():
acc = ds.xroms.speed
assert np.allclose(acc, xroms.speed(ds.u, ds.v, grid))
# also check attributes
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
# cf-xarray: make sure all Axes and Coordinates available in output
hcoord = "rho"
scoord = "s_rho"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_KE():
s = xroms.speed(ds.u, ds.v, grid)
acc = ds.xroms.KE
assert np.allclose(acc, xroms.KE(ds.rho0, s))
# also check attributes
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
# cf-xarray: make sure all Axes and Coordinates available in output
hcoord = "rho"
scoord = "s_rho"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_uv_geostrophic():
acc = ds.xroms.ug
assert np.allclose(acc, xroms.uv_geostrophic(ds.zeta, ds.f, grid, which="xi"))
# also check attributes
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
# cf-xarray: make sure all Axes and Coordinates available in output
hcoord = "u"
scoord = None
dims = dim_dict[hcoord][scoord]
axes = axesTYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
acc = ds.xroms.vg
assert np.allclose(acc, xroms.uv_geostrophic(ds.zeta, ds.f, grid, which="eta"))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "v"
scoord = None
dims = dim_dict[hcoord][scoord]
axes = axesTYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_EKE():
acc = ds.xroms.EKE
xug, xvg = xroms.uv_geostrophic(ds.zeta, ds.f, grid, which="both")
assert np.allclose(acc, xroms.EKE(xug, xvg, grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = None
dims = dim_dict[hcoord][scoord]
axes = axesTYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_dudz():
acc = ds.xroms.dudz
assert np.allclose(acc, xroms.dudz(ds.u, grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "u"
scoord = "s_w"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_dvdz():
acc = ds.xroms.dvdz
assert np.allclose(acc, xroms.dvdz(ds.v, grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "v"
scoord = "s_w"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_vertical_shear():
xdudz = ds.xroms.dudz
xdvdz = ds.xroms.dvdz
acc = ds.xroms.vertical_shear
assert np.allclose(acc, xroms.vertical_shear(xdudz, xdvdz, grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = "s_w"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_relative_vorticity():
acc = ds.xroms.vort
assert np.allclose(acc, 0)
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "psi"
scoord = "s_w"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_ertel():
acc = ds.xroms.ertel
xsig0 = xroms.potential_density(ds.temp, ds.salt)
xbuoy = xroms.buoyancy(xsig0)
assert np.allclose(acc, xroms.ertel(xbuoy, ds.u, ds.v, ds.f, grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = "s_rho"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_w():
# VRX
pass
# acc = ds.xroms.w
# assert np.allclose(acc, xroms.w(ds.u, ds.v, grid))
# acc.name == acc.attrs['name']
# acc.attrs['grid'] == ds.xroms.grid
# items = ['T','X','Y','Z','longitude','latitude','vertical','time']
# assert set(items).issubset(acc.cf.get_valid_keys())
def test_omega():
# VRX
pass
# acc = ds.xroms.omega
# assert np.allclose(acc, xroms.omega(ds.u, ds.v, grid))
# acc.name == acc.attrs['name']
# acc.attrs['grid'] == ds.xroms.grid
# items = ['T','X','Y','Z','longitude','latitude','vertical','time']
# assert set(items).issubset(acc.cf.get_valid_keys())
def test_rho():
acc = ds.xroms.rho
assert np.allclose(acc, xroms.density(ds.temp, ds.salt, ds.z_rho))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = "s_rho"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_sig0():
acc = ds.xroms.sig0
assert np.allclose(acc, xroms.potential_density(ds.temp, ds.salt, 0))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = "s_rho"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_buoyancy():
acc = ds.xroms.buoyancy
xsig0 = xroms.potential_density(ds.temp, ds.salt)
assert np.allclose(acc, xroms.buoyancy(xsig0))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = "s_rho"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_N2():
acc = ds.xroms.N2
xrho = xroms.density(ds.temp, ds.salt, ds.z_rho)
assert np.allclose(acc, xroms.N2(xrho, grid), equal_nan=True)
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = "s_w"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_M2():
acc = ds.xroms.M2
xrho = xroms.density(ds.temp, ds.salt, ds.z_rho)
assert np.allclose(acc, xroms.M2(xrho, grid), equal_nan=True)
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = "s_w"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_mld():
acc = ds.xroms.mld(thresh=0.03)
sig0 = xroms.potential_density(ds.temp, ds.salt, 0)
assert np.allclose(acc, xroms.mld(sig0, ds.h, ds.mask_rho), equal_nan=True)
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
hcoord = "rho"
scoord = None
dims = dim_dict[hcoord][scoord]
axes = axesTYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_ddxi():
testvars = ["salt", "u", "v"]
for testvar in testvars:
acc = ds[testvar].xroms.ddxi()
assert np.allclose(acc, xroms.ddxi(ds[testvar], grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
if testvar == "salt":
hcoord = "u"
scoord = "s_w"
elif testvar == "u":
hcoord = "rho"
scoord = "s_w"
elif testvar == "v":
hcoord = "psi"
scoord = "s_w"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
acc = ds.xroms.ddxi(testvar)
assert np.allclose(acc, xroms.ddxi(ds[testvar], grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_ddeta():
testvars = ["salt", "u", "v"]
for testvar in testvars:
acc = ds[testvar].xroms.ddeta()
assert np.allclose(acc, xroms.ddeta(ds[testvar], grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
if testvar == "salt":
hcoord = "v"
scoord = "s_w"
elif testvar == "u":
hcoord = "psi"
scoord = "s_w"
elif testvar == "v":
hcoord = "rho"
scoord = "s_w"
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
acc = ds.xroms.ddeta(testvar)
assert np.allclose(acc, xroms.ddeta(ds[testvar], grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_ddz():
testvars = ["salt", "u", "v"]
for testvar in testvars:
acc = ds[testvar].xroms.ddz()
assert np.allclose(acc, xroms.ddz(ds[testvar], grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
dims = list(ds[testvar].dims)
axes = axesTZYX
coords = [ds[testvar].cf[coordname].name for coordname in coordnamesTZYX]
coordnames = coordnamesTZYX
# correct dim and coord in derivative direction
if grid.axes["Z"]._get_axis_coord(ds[testvar])[1] == "s_rho":
dims[1] = "s_w"
coords[1] = coords[1].replace("rho", "w")
else:
dims[1] = "s_rho"
coords[1] = coords[1].replace("w", "rho")
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
acc = ds.xroms.ddz(testvar)
assert np.allclose(acc, xroms.ddz(ds[testvar], grid))
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_to_grid():
testvars = ["salt", "u", "v"]
for testvar in testvars:
for scoord in ["s_w", "s_rho"]:
for hcoord in ["rho", "u", "v", "psi"]:
acc = ds[testvar].xroms.to_grid(hcoord=hcoord, scoord=scoord)
assert np.allclose(
acc, xroms.to_grid(ds[testvar], grid, hcoord=hcoord, scoord=scoord)
)
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
dims = dim_dict[hcoord][scoord]
axes = axesTZYX
coords = coord_dict[hcoord][scoord]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
acc = ds.xroms.to_grid(testvar, hcoord=hcoord, scoord=scoord)
assert np.allclose(
acc, xroms.to_grid(ds[testvar], grid, hcoord=hcoord, scoord=scoord)
)
assert acc.name == acc.attrs["name"]
assert isinstance(acc.attrs["grid"], xgrid.Grid)
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_sel2d():
lon0, lat0 = -94.8, 28.0
testvars = ["salt", "u", "v"]
for testvar in testvars:
acc = ds[testvar].xroms.sel2d(lon0, lat0)
out = xroms.sel2d(
ds[testvar],
ds[testvar].cf["longitude"],
ds[testvar].cf["latitude"],
lon0,
lat0,
)
assert np.allclose(acc, out)
assert acc.name == testvar
assert isinstance(acc.attrs["grid"], xgrid.Grid)
dims = ds[testvar].dims
axes = axesTZYX
coords = [ds[testvar].cf[coordname].name for coordname in coordnamesTZYX]
coordnames = coordnamesTZYX
for ax, dim in zip(axes, dims):
assert acc.cf[ax].name == dim
for coordname, coord in zip(coordnames, coords):
assert acc.cf[coordname].name == coord
def test_argsel2d():
lon0, lat0 = -94.8, 28.0
testvars = ["salt", "u", "v"]
for testvar in testvars:
inds = ds[testvar].xroms.argsel2d(lon0, lat0)
outinds = xroms.argsel2d(
ds[testvar].cf["longitude"], ds[testvar].cf["latitude"], lon0, lat0
)
assert np.allclose(inds, outinds)
def test_gridmean():
testvars = ["salt", "u", "v"]
for testvar in testvars:
for axis in ["Z", "Y", "X"]:
var1 = ds[testvar].xroms.gridmean(axis)
var2 = xroms.gridmean(ds[testvar], grid, axis)
assert np.allclose(var1, var2)
def test_gridsum():
testvars = ["salt", "u", "v"]
for testvar in testvars:
for axis in ["Z", "Y", "X"]:
var1 = ds[testvar].xroms.gridsum(axis)
var2 = xroms.gridsum(ds[testvar], grid, axis)
assert np.allclose(var1, var2)
def test_interpll():
ie, ix = 2, 3
indexer = {"eta_rho": [ie], "xi_rho": [ix]}
testvars = ["salt", "u", "v"]
for testvar in testvars:
var1 = xroms.interpll(
ds[testvar], ds.lon_rho.isel(indexer), ds.lat_rho.isel(indexer)
)
var2 = ds[testvar].xroms.interpll(
ds.lon_rho.isel(indexer), ds.lat_rho.isel(indexer)
)
assert np.allclose(var1, var2)
def test_zslice():
testvars = ["salt", "u", "v"]
for testvar in testvars:
varin = ds[testvar]
depths = np.asarray(ds[testvar].cf["vertical"][0, :, 0, 0].values)
varout = xroms.isoslice(varin, depths, grid, axis="Z")
varcomp = ds[testvar].xroms.isoslice(depths, axis="Z")
assert np.allclose(
varout.cf.isel(T=0, Y=0, X=0), varcomp.cf.isel(T=0, Y=0, X=0)
)
|
py | 1a496382ae27ee52375fa60f0e8aa67995619555 | from django.contrib import admin
from .models import Category
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('category_name', )}
list_display = ('category_name', 'slug')
# Register your models here.
admin.site.register(Category, CategoryAdmin)
|
py | 1a4963a8e001bde595007ae2f6356cc9edce6298 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
class coinmarketcap(Exchange):
def describe(self):
return self.deep_extend(super(coinmarketcap, self).describe(), {
'id': 'coinmarketcap',
'name': 'CoinMarketCap',
'rateLimit': 10000,
'version': 'v1',
'countries': ['US'],
'has': {
'CORS': True,
'privateAPI': False,
'createOrder': False,
'createMarketOrder': False,
'createLimitOrder': False,
'cancelOrder': False,
'editOrder': False,
'fetchBalance': False,
'fetchOrderBook': False,
'fetchL2OrderBook': False,
'fetchOHLCV': False,
'fetchTrades': False,
'fetchTickers': True,
'fetchCurrencies': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/28244244-9be6312a-69ed-11e7-99c1-7c1797275265.jpg',
'api': {
'public': 'https://api.coinmarketcap.com',
'files': 'https://files.coinmarketcap.com',
'charts': 'https://graph.coinmarketcap.com',
},
'www': 'https://coinmarketcap.com',
'doc': 'https://coinmarketcap.com/api',
},
'requiredCredentials': {
'apiKey': False,
'secret': False,
},
'api': {
'files': {
'get': [
'generated/stats/global.json',
],
},
'graphs': {
'get': [
'currencies/{name}/',
],
},
'public': {
'get': [
'ticker/',
'ticker/{id}/',
'global/',
],
},
},
'currencyCodes': [
'AUD',
'BRL',
'CAD',
'CHF',
'CNY',
'EUR',
'GBP',
'HKD',
'IDR',
'INR',
'JPY',
'KRW',
'MXN',
'RUB',
'USD',
'BTC',
'ETH',
'LTC',
],
})
async def fetch_order_book(self, symbol, limit=None, params={}):
raise ExchangeError('Fetching order books is not supported by the API of ' + self.id)
def currency_code(self, base, name):
currencies = {
'ACChain': 'ACChain',
'AdCoin': 'AdCoin',
'BatCoin': 'BatCoin',
'Bitgem': 'Bitgem',
'BlazeCoin': 'BlazeCoin',
'BlockCAT': 'BlockCAT',
'Blocktrade Token': 'Blocktrade Token',
'Catcoin': 'Catcoin',
'CanYaCoin': 'CanYaCoin', # conflict with CAN(Content and AD Network)
'CryptoBossCoin': 'CryptoBossCoin', # conflict with CBC(CashBet Coin)
'Comet': 'Comet', # conflict with CMT(CyberMiles)
'CPChain': 'CPChain',
'CrowdCoin': 'CrowdCoin', # conflict with CRC CryCash
'Cryptaur': 'Cryptaur', # conflict with CPT = Contents Protocol https://github.com/ccxt/ccxt/issues/4920 and https://github.com/ccxt/ccxt/issues/6081
'Cubits': 'Cubits', # conflict with QBT(Qbao)
'DAO.Casino': 'DAO.Casino', # conflict with BET(BetaCoin)
'E-Dinar Coin': 'E-Dinar Coin', # conflict with EDR Endor Protocol and EDRCoin
'EDRcoin': 'EDRcoin', # conflict with EDR Endor Protocol and E-Dinar Coin
'ENTCash': 'ENTCash', # conflict with ENT(Eternity)
'FairCoin': 'FairCoin', # conflict with FAIR(FairGame) https://github.com/ccxt/ccxt/pull/5865
'Fabric Token': 'Fabric Token',
# 'GET Protocol': 'GET Protocol',
'Global Tour Coin': 'Global Tour Coin', # conflict with GTC(Game.com)
'GuccioneCoin': 'GuccioneCoin', # conflict with GCC(Global Cryptocurrency)
'HarmonyCoin': 'HarmonyCoin', # conflict with HMC(Hi Mutual Society)
'Harvest Masternode Coin': 'Harvest Masternode Coin', # conflict with HC(HyperCash)
'HOT Token': 'HOT Token',
'Hydro Protocol': 'Hydro Protocol', # conflict with HOT(Holo)
'Huncoin': 'Huncoin', # conflict with HNC(Helleniccoin)
'iCoin': 'iCoin',
'Infinity Economics': 'Infinity Economics', # conflict with XIN(Mixin)
'KingN Coin': 'KingN Coin', # conflict with KNC(Kyber Network)
'LiteBitcoin': 'LiteBitcoin', # conflict with LBTC(LightningBitcoin)
'Maggie': 'Maggie',
'Monarch': 'Monarch', # conflict with MyToken(MT)
'MTC Mesh Network': 'MTC Mesh Network', # conflict with MTC Docademic doc.com Token https://github.com/ccxt/ccxt/issues/6081 https://github.com/ccxt/ccxt/issues/3025
'IOTA': 'IOTA', # a special case, most exchanges list it as IOTA, therefore we change just the Coinmarketcap instead of changing them all
'NetCoin': 'NetCoin',
'PCHAIN': 'PCHAIN', # conflict with PAI(Project Pai)
'Plair': 'Plair', # conflict with PLA(PLANET)
'PlayChip': 'PlayChip', # conflict with PLA(PLANET)
'Polcoin': 'Polcoin',
'PutinCoin': 'PutinCoin', # conflict with PUT(Profile Utility Token)
'Rcoin': 'Rcoin', # conflict with RCN(Ripio Credit Network)
# https://github.com/ccxt/ccxt/issues/6081
# https://github.com/ccxt/ccxt/issues/3365
# https://github.com/ccxt/ccxt/issues/2873
'Themis': 'Themis', # conflict with GET(Guaranteed Entrance Token, GET Protocol)
'Menlo One': 'Menlo One', # conflict with Harmony(ONE)
'BigONE Token': 'BigONE Token', # conflict with Harmony(ONE)
}
return self.safe_value(currencies, name, base)
async def fetch_markets(self, params={}):
request = {
'limit': 0,
}
response = await self.publicGetTicker(self.extend(request, params))
result = []
for i in range(0, len(response)):
market = response[i]
currencies = self.currencyCodes
for j in range(0, len(currencies)):
quote = currencies[j]
quoteId = quote.lower()
baseId = market['id']
base = self.currency_code(market['symbol'], market['name'])
symbol = base + '/' + quote
id = baseId + '/' + quoteId
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'active': None,
'precision': self.precision,
'limits': self.limits,
})
return result
async def fetch_global(self, currency='USD'):
await self.load_markets()
request = {}
if currency:
request['convert'] = currency
return await self.publicGetGlobal(request)
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_timestamp(ticker, 'last_updated')
if timestamp is None:
timestamp = self.milliseconds()
change = self.safe_float(ticker, 'percent_change_24h')
last = None
symbol = None
volume = None
if market is not None:
symbol = market['symbol']
priceKey = 'price_' + market['quoteId']
last = self.safe_float(ticker, priceKey)
volumeKey = '24h_volume_' + market['quoteId']
volume = self.safe_float(ticker, volumeKey)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': change,
'average': None,
'baseVolume': None,
'quoteVolume': volume,
'info': ticker,
}
async def fetch_tickers(self, currency='USD', params={}):
await self.load_markets()
request = {
'limit': 10000,
}
if currency:
request['convert'] = currency
response = await self.publicGetTicker(self.extend(request, params))
result = {}
for t in range(0, len(response)):
ticker = response[t]
currencyId = currency.lower()
id = ticker['id'] + '/' + currencyId
symbol = id
market = None
if id in self.markets_by_id:
market = self.markets_by_id[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'convert': market['quote'],
'id': market['baseId'],
}
response = await self.publicGetTickerId(self.extend(request, params))
ticker = response[0]
return self.parse_ticker(ticker, market)
async def fetch_currencies(self, params={}):
request = {
'limit': 0,
}
response = await self.publicGetTicker(self.extend(request, params))
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'symbol')
name = self.safe_string(currency, 'name')
# todo: will need to rethink the fees
# to add support for multiple withdrawal/deposit methods and
# differentiated fees for each particular method
precision = 8 # default precision, todo: fix "magic constants"
code = self.currency_code(id, name)
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': True,
'fee': None, # todo: redesign
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': None,
},
},
}
return result
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api] + '/' + self.version + '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if query:
url += '?' + self.urlencode(query)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
if 'error' in response:
if response['error']:
raise ExchangeError(self.id + ' ' + self.json(response))
return response
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.